2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
31 * Display PLLs used for driving outputs vary by platform. While some have
32 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33 * from a pool. In the latter scenario, it is possible that multiple pipes
34 * share a PLL if their configurations match.
36 * This file provides an abstraction over display PLLs. The function
37 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
38 * users of a PLL are tracked and that tracking is integrated with the atomic
39 * modset interface. During an atomic operation, required PLLs can be reserved
40 * for a given CRTC and encoder configuration by calling
41 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42 * with intel_release_shared_dplls().
43 * Changes to the users are first staged in the atomic state, and then made
44 * effective by calling intel_shared_dpll_swap_state() during the atomic
48 struct intel_dpll_mgr {
49 const struct dpll_info *dpll_info;
51 bool (*get_dplls)(struct intel_atomic_state *state,
52 struct intel_crtc *crtc,
53 struct intel_encoder *encoder);
54 void (*put_dplls)(struct intel_atomic_state *state,
55 struct intel_crtc *crtc);
56 void (*update_active_dpll)(struct intel_atomic_state *state,
57 struct intel_crtc *crtc,
58 struct intel_encoder *encoder);
59 void (*update_ref_clks)(struct drm_i915_private *i915);
60 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
61 const struct intel_dpll_hw_state *hw_state);
65 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
66 struct intel_shared_dpll_state *shared_dpll)
70 /* Copy shared dpll state */
71 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
72 struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
74 shared_dpll[i] = pll->state;
78 static struct intel_shared_dpll_state *
79 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
81 struct intel_atomic_state *state = to_intel_atomic_state(s);
83 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
85 if (!state->dpll_set) {
86 state->dpll_set = true;
88 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
92 return state->shared_dpll;
96 * intel_get_shared_dpll_by_id - get a DPLL given its id
97 * @dev_priv: i915 device instance
101 * A pointer to the DPLL with @id
103 struct intel_shared_dpll *
104 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
105 enum intel_dpll_id id)
107 return &dev_priv->dpll.shared_dplls[id];
111 * intel_get_shared_dpll_id - get the id of a DPLL
112 * @dev_priv: i915 device instance
119 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
120 struct intel_shared_dpll *pll)
122 long pll_idx = pll - dev_priv->dpll.shared_dplls;
124 if (drm_WARN_ON(&dev_priv->drm,
126 pll_idx >= dev_priv->dpll.num_shared_dpll))
133 void assert_shared_dpll(struct drm_i915_private *dev_priv,
134 struct intel_shared_dpll *pll,
138 struct intel_dpll_hw_state hw_state;
140 if (drm_WARN(&dev_priv->drm, !pll,
141 "asserting DPLL %s with no DPLL\n", onoff(state)))
144 cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
145 I915_STATE_WARN(cur_state != state,
146 "%s assertion failure (expected %s, current %s)\n",
147 pll->info->name, onoff(state), onoff(cur_state));
151 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
152 struct intel_shared_dpll *pll)
155 if (IS_ELKHARTLAKE(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
156 return MG_PLL_ENABLE(0);
158 return CNL_DPLL_ENABLE(pll->info->id);
163 * intel_prepare_shared_dpll - call a dpll's prepare hook
164 * @crtc_state: CRTC, and its state, which has a shared dpll
166 * This calls the PLL's prepare hook if it has one and if the PLL is not
167 * already enabled. The prepare hook is platform specific.
169 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
171 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
172 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
173 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
175 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
178 mutex_lock(&dev_priv->dpll.lock);
179 drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
180 if (!pll->active_mask) {
181 drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
182 drm_WARN_ON(&dev_priv->drm, pll->on);
183 assert_shared_dpll_disabled(dev_priv, pll);
185 pll->info->funcs->prepare(dev_priv, pll);
187 mutex_unlock(&dev_priv->dpll.lock);
191 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
192 * @crtc_state: CRTC, and its state, which has a shared DPLL
194 * Enable the shared DPLL used by @crtc.
196 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
198 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
199 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
200 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
201 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
202 unsigned int old_mask;
204 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
207 mutex_lock(&dev_priv->dpll.lock);
208 old_mask = pll->active_mask;
210 if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
211 drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
214 pll->active_mask |= crtc_mask;
216 drm_dbg_kms(&dev_priv->drm,
217 "enable %s (active %x, on? %d) for crtc %d\n",
218 pll->info->name, pll->active_mask, pll->on,
222 drm_WARN_ON(&dev_priv->drm, !pll->on);
223 assert_shared_dpll_enabled(dev_priv, pll);
226 drm_WARN_ON(&dev_priv->drm, pll->on);
228 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
229 pll->info->funcs->enable(dev_priv, pll);
233 mutex_unlock(&dev_priv->dpll.lock);
237 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
238 * @crtc_state: CRTC, and its state, which has a shared DPLL
240 * Disable the shared DPLL used by @crtc.
242 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
244 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
245 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
246 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
247 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
249 /* PCH only available on ILK+ */
250 if (INTEL_GEN(dev_priv) < 5)
256 mutex_lock(&dev_priv->dpll.lock);
257 if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
260 drm_dbg_kms(&dev_priv->drm,
261 "disable %s (active %x, on? %d) for crtc %d\n",
262 pll->info->name, pll->active_mask, pll->on,
265 assert_shared_dpll_enabled(dev_priv, pll);
266 drm_WARN_ON(&dev_priv->drm, !pll->on);
268 pll->active_mask &= ~crtc_mask;
269 if (pll->active_mask)
272 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
273 pll->info->funcs->disable(dev_priv, pll);
277 mutex_unlock(&dev_priv->dpll.lock);
280 static struct intel_shared_dpll *
281 intel_find_shared_dpll(struct intel_atomic_state *state,
282 const struct intel_crtc *crtc,
283 const struct intel_dpll_hw_state *pll_state,
284 unsigned long dpll_mask)
286 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
287 struct intel_shared_dpll *pll, *unused_pll = NULL;
288 struct intel_shared_dpll_state *shared_dpll;
289 enum intel_dpll_id i;
291 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
293 drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
295 for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
296 pll = &dev_priv->dpll.shared_dplls[i];
298 /* Only want to check enabled timings first */
299 if (shared_dpll[i].crtc_mask == 0) {
305 if (memcmp(pll_state,
306 &shared_dpll[i].hw_state,
307 sizeof(*pll_state)) == 0) {
308 drm_dbg_kms(&dev_priv->drm,
309 "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
310 crtc->base.base.id, crtc->base.name,
312 shared_dpll[i].crtc_mask,
318 /* Ok no matching timings, maybe there's a free one? */
320 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
321 crtc->base.base.id, crtc->base.name,
322 unused_pll->info->name);
330 intel_reference_shared_dpll(struct intel_atomic_state *state,
331 const struct intel_crtc *crtc,
332 const struct intel_shared_dpll *pll,
333 const struct intel_dpll_hw_state *pll_state)
335 struct drm_i915_private *i915 = to_i915(state->base.dev);
336 struct intel_shared_dpll_state *shared_dpll;
337 const enum intel_dpll_id id = pll->info->id;
339 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
341 if (shared_dpll[id].crtc_mask == 0)
342 shared_dpll[id].hw_state = *pll_state;
344 drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
345 pipe_name(crtc->pipe));
347 shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
350 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
351 const struct intel_crtc *crtc,
352 const struct intel_shared_dpll *pll)
354 struct intel_shared_dpll_state *shared_dpll;
356 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
357 shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
360 static void intel_put_dpll(struct intel_atomic_state *state,
361 struct intel_crtc *crtc)
363 const struct intel_crtc_state *old_crtc_state =
364 intel_atomic_get_old_crtc_state(state, crtc);
365 struct intel_crtc_state *new_crtc_state =
366 intel_atomic_get_new_crtc_state(state, crtc);
368 new_crtc_state->shared_dpll = NULL;
370 if (!old_crtc_state->shared_dpll)
373 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
377 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
378 * @state: atomic state
380 * This is the dpll version of drm_atomic_helper_swap_state() since the
381 * helper does not handle driver-specific global state.
383 * For consistency with atomic helpers this function does a complete swap,
384 * i.e. it also puts the current state into @state, even though there is no
385 * need for that at this moment.
387 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
389 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
390 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
391 enum intel_dpll_id i;
393 if (!state->dpll_set)
396 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
397 struct intel_shared_dpll *pll =
398 &dev_priv->dpll.shared_dplls[i];
400 swap(pll->state, shared_dpll[i]);
404 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
405 struct intel_shared_dpll *pll,
406 struct intel_dpll_hw_state *hw_state)
408 const enum intel_dpll_id id = pll->info->id;
409 intel_wakeref_t wakeref;
412 wakeref = intel_display_power_get_if_enabled(dev_priv,
413 POWER_DOMAIN_DISPLAY_CORE);
417 val = intel_de_read(dev_priv, PCH_DPLL(id));
418 hw_state->dpll = val;
419 hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
420 hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
422 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
424 return val & DPLL_VCO_ENABLE;
427 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
428 struct intel_shared_dpll *pll)
430 const enum intel_dpll_id id = pll->info->id;
432 intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
433 intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
436 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
441 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
443 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
444 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
445 DREF_SUPERSPREAD_SOURCE_MASK));
446 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
449 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
450 struct intel_shared_dpll *pll)
452 const enum intel_dpll_id id = pll->info->id;
454 /* PCH refclock must be enabled first */
455 ibx_assert_pch_refclk_enabled(dev_priv);
457 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
459 /* Wait for the clocks to stabilize. */
460 intel_de_posting_read(dev_priv, PCH_DPLL(id));
463 /* The pixel multiplier can only be updated once the
464 * DPLL is enabled and the clocks are stable.
468 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
469 intel_de_posting_read(dev_priv, PCH_DPLL(id));
473 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
474 struct intel_shared_dpll *pll)
476 const enum intel_dpll_id id = pll->info->id;
478 intel_de_write(dev_priv, PCH_DPLL(id), 0);
479 intel_de_posting_read(dev_priv, PCH_DPLL(id));
483 static bool ibx_get_dpll(struct intel_atomic_state *state,
484 struct intel_crtc *crtc,
485 struct intel_encoder *encoder)
487 struct intel_crtc_state *crtc_state =
488 intel_atomic_get_new_crtc_state(state, crtc);
489 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
490 struct intel_shared_dpll *pll;
491 enum intel_dpll_id i;
493 if (HAS_PCH_IBX(dev_priv)) {
494 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
495 i = (enum intel_dpll_id) crtc->pipe;
496 pll = &dev_priv->dpll.shared_dplls[i];
498 drm_dbg_kms(&dev_priv->drm,
499 "[CRTC:%d:%s] using pre-allocated %s\n",
500 crtc->base.base.id, crtc->base.name,
503 pll = intel_find_shared_dpll(state, crtc,
504 &crtc_state->dpll_hw_state,
505 BIT(DPLL_ID_PCH_PLL_B) |
506 BIT(DPLL_ID_PCH_PLL_A));
512 /* reference the pll */
513 intel_reference_shared_dpll(state, crtc,
514 pll, &crtc_state->dpll_hw_state);
516 crtc_state->shared_dpll = pll;
521 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
522 const struct intel_dpll_hw_state *hw_state)
524 drm_dbg_kms(&dev_priv->drm,
525 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
526 "fp0: 0x%x, fp1: 0x%x\n",
533 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
534 .prepare = ibx_pch_dpll_prepare,
535 .enable = ibx_pch_dpll_enable,
536 .disable = ibx_pch_dpll_disable,
537 .get_hw_state = ibx_pch_dpll_get_hw_state,
540 static const struct dpll_info pch_plls[] = {
541 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
542 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
546 static const struct intel_dpll_mgr pch_pll_mgr = {
547 .dpll_info = pch_plls,
548 .get_dplls = ibx_get_dpll,
549 .put_dplls = intel_put_dpll,
550 .dump_hw_state = ibx_dump_hw_state,
553 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
554 struct intel_shared_dpll *pll)
556 const enum intel_dpll_id id = pll->info->id;
558 intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
559 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
563 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
564 struct intel_shared_dpll *pll)
566 intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
567 intel_de_posting_read(dev_priv, SPLL_CTL);
571 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
572 struct intel_shared_dpll *pll)
574 const enum intel_dpll_id id = pll->info->id;
577 val = intel_de_read(dev_priv, WRPLL_CTL(id));
578 intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
579 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
582 * Try to set up the PCH reference clock once all DPLLs
583 * that depend on it have been shut down.
585 if (dev_priv->pch_ssc_use & BIT(id))
586 intel_init_pch_refclk(dev_priv);
589 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
590 struct intel_shared_dpll *pll)
592 enum intel_dpll_id id = pll->info->id;
595 val = intel_de_read(dev_priv, SPLL_CTL);
596 intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
597 intel_de_posting_read(dev_priv, SPLL_CTL);
600 * Try to set up the PCH reference clock once all DPLLs
601 * that depend on it have been shut down.
603 if (dev_priv->pch_ssc_use & BIT(id))
604 intel_init_pch_refclk(dev_priv);
607 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
608 struct intel_shared_dpll *pll,
609 struct intel_dpll_hw_state *hw_state)
611 const enum intel_dpll_id id = pll->info->id;
612 intel_wakeref_t wakeref;
615 wakeref = intel_display_power_get_if_enabled(dev_priv,
616 POWER_DOMAIN_DISPLAY_CORE);
620 val = intel_de_read(dev_priv, WRPLL_CTL(id));
621 hw_state->wrpll = val;
623 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
625 return val & WRPLL_PLL_ENABLE;
628 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
629 struct intel_shared_dpll *pll,
630 struct intel_dpll_hw_state *hw_state)
632 intel_wakeref_t wakeref;
635 wakeref = intel_display_power_get_if_enabled(dev_priv,
636 POWER_DOMAIN_DISPLAY_CORE);
640 val = intel_de_read(dev_priv, SPLL_CTL);
641 hw_state->spll = val;
643 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
645 return val & SPLL_PLL_ENABLE;
649 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
655 /* Constraints for PLL good behavior */
661 struct hsw_wrpll_rnp {
665 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
739 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
740 unsigned int r2, unsigned int n2,
742 struct hsw_wrpll_rnp *best)
744 u64 a, b, c, d, diff, diff_best;
746 /* No best (r,n,p) yet */
755 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
759 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
762 * and we would like delta <= budget.
764 * If the discrepancy is above the PPM-based budget, always prefer to
765 * improve upon the previous solution. However, if you're within the
766 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
768 a = freq2k * budget * p * r2;
769 b = freq2k * budget * best->p * best->r2;
770 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
771 diff_best = abs_diff(freq2k * best->p * best->r2,
772 LC_FREQ_2K * best->n2);
774 d = 1000000 * diff_best;
776 if (a < c && b < d) {
777 /* If both are above the budget, pick the closer */
778 if (best->p * best->r2 * diff < p * r2 * diff_best) {
783 } else if (a >= c && b < d) {
784 /* If A is below the threshold but B is above it? Update. */
788 } else if (a >= c && b >= d) {
789 /* Both are below the limit, so pick the higher n2/(r2*r2) */
790 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
796 /* Otherwise a < c && b >= d, do nothing */
800 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
801 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
805 struct hsw_wrpll_rnp best = { 0, 0, 0 };
808 freq2k = clock / 100;
810 budget = hsw_wrpll_get_budget_for_freq(clock);
812 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
813 * and directly pass the LC PLL to it. */
814 if (freq2k == 5400000) {
822 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
825 * We want R so that REF_MIN <= Ref <= REF_MAX.
826 * Injecting R2 = 2 * R gives:
827 * REF_MAX * r2 > LC_FREQ * 2 and
828 * REF_MIN * r2 < LC_FREQ * 2
830 * Which means the desired boundaries for r2 are:
831 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
834 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
835 r2 <= LC_FREQ * 2 / REF_MIN;
839 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
841 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
842 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
843 * VCO_MAX * r2 > n2 * LC_FREQ and
844 * VCO_MIN * r2 < n2 * LC_FREQ)
846 * Which means the desired boundaries for n2 are:
847 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
849 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
850 n2 <= VCO_MAX * r2 / LC_FREQ;
853 for (p = P_MIN; p <= P_MAX; p += P_INC)
854 hsw_wrpll_update_rnp(freq2k, budget,
864 static struct intel_shared_dpll *
865 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
866 struct intel_crtc *crtc)
868 struct intel_crtc_state *crtc_state =
869 intel_atomic_get_new_crtc_state(state, crtc);
870 struct intel_shared_dpll *pll;
872 unsigned int p, n2, r2;
874 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
876 val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
877 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
878 WRPLL_DIVIDER_POST(p);
880 crtc_state->dpll_hw_state.wrpll = val;
882 pll = intel_find_shared_dpll(state, crtc,
883 &crtc_state->dpll_hw_state,
884 BIT(DPLL_ID_WRPLL2) |
885 BIT(DPLL_ID_WRPLL1));
893 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
894 const struct intel_shared_dpll *pll)
898 u32 wrpll = pll->state.hw_state.wrpll;
900 switch (wrpll & WRPLL_REF_MASK) {
901 case WRPLL_REF_SPECIAL_HSW:
902 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
903 if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
904 refclk = dev_priv->dpll.ref_clks.nssc;
908 case WRPLL_REF_PCH_SSC:
910 * We could calculate spread here, but our checking
911 * code only cares about 5% accuracy, and spread is a max of
914 refclk = dev_priv->dpll.ref_clks.ssc;
916 case WRPLL_REF_LCPLL:
924 r = wrpll & WRPLL_DIVIDER_REF_MASK;
925 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
926 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
928 /* Convert to KHz, p & r have a fixed point portion */
929 return (refclk * n / 10) / (p * r) * 2;
932 static struct intel_shared_dpll *
933 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
935 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
936 struct intel_shared_dpll *pll;
937 enum intel_dpll_id pll_id;
938 int clock = crtc_state->port_clock;
942 pll_id = DPLL_ID_LCPLL_810;
945 pll_id = DPLL_ID_LCPLL_1350;
948 pll_id = DPLL_ID_LCPLL_2700;
951 drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
956 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
964 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
965 const struct intel_shared_dpll *pll)
969 switch (pll->info->id) {
970 case DPLL_ID_LCPLL_810:
973 case DPLL_ID_LCPLL_1350:
976 case DPLL_ID_LCPLL_2700:
980 drm_WARN(&i915->drm, 1, "bad port clock sel\n");
984 return link_clock * 2;
987 static struct intel_shared_dpll *
988 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
989 struct intel_crtc *crtc)
991 struct intel_crtc_state *crtc_state =
992 intel_atomic_get_new_crtc_state(state, crtc);
994 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
997 crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
1000 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1004 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1005 const struct intel_shared_dpll *pll)
1009 switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
1010 case SPLL_FREQ_810MHz:
1013 case SPLL_FREQ_1350MHz:
1014 link_clock = 135000;
1016 case SPLL_FREQ_2700MHz:
1017 link_clock = 270000;
1020 drm_WARN(&i915->drm, 1, "bad spll freq\n");
1024 return link_clock * 2;
1027 static bool hsw_get_dpll(struct intel_atomic_state *state,
1028 struct intel_crtc *crtc,
1029 struct intel_encoder *encoder)
1031 struct intel_crtc_state *crtc_state =
1032 intel_atomic_get_new_crtc_state(state, crtc);
1033 struct intel_shared_dpll *pll;
1035 memset(&crtc_state->dpll_hw_state, 0,
1036 sizeof(crtc_state->dpll_hw_state));
1038 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1039 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1040 else if (intel_crtc_has_dp_encoder(crtc_state))
1041 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1042 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1043 pll = hsw_ddi_spll_get_dpll(state, crtc);
1050 intel_reference_shared_dpll(state, crtc,
1051 pll, &crtc_state->dpll_hw_state);
1053 crtc_state->shared_dpll = pll;
1058 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1060 i915->dpll.ref_clks.ssc = 135000;
1061 /* Non-SSC is only used on non-ULT HSW. */
1062 if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1063 i915->dpll.ref_clks.nssc = 24000;
1065 i915->dpll.ref_clks.nssc = 135000;
1068 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1069 const struct intel_dpll_hw_state *hw_state)
1071 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1072 hw_state->wrpll, hw_state->spll);
1075 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1076 .enable = hsw_ddi_wrpll_enable,
1077 .disable = hsw_ddi_wrpll_disable,
1078 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1079 .get_freq = hsw_ddi_wrpll_get_freq,
1082 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1083 .enable = hsw_ddi_spll_enable,
1084 .disable = hsw_ddi_spll_disable,
1085 .get_hw_state = hsw_ddi_spll_get_hw_state,
1086 .get_freq = hsw_ddi_spll_get_freq,
1089 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1090 struct intel_shared_dpll *pll)
1094 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1095 struct intel_shared_dpll *pll)
1099 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1100 struct intel_shared_dpll *pll,
1101 struct intel_dpll_hw_state *hw_state)
1106 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1107 .enable = hsw_ddi_lcpll_enable,
1108 .disable = hsw_ddi_lcpll_disable,
1109 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1110 .get_freq = hsw_ddi_lcpll_get_freq,
1113 static const struct dpll_info hsw_plls[] = {
1114 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1115 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1116 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1117 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1118 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1119 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1123 static const struct intel_dpll_mgr hsw_pll_mgr = {
1124 .dpll_info = hsw_plls,
1125 .get_dplls = hsw_get_dpll,
1126 .put_dplls = intel_put_dpll,
1127 .update_ref_clks = hsw_update_dpll_ref_clks,
1128 .dump_hw_state = hsw_dump_hw_state,
1131 struct skl_dpll_regs {
1132 i915_reg_t ctl, cfgcr1, cfgcr2;
1135 /* this array is indexed by the *shared* pll id */
1136 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1140 /* DPLL 0 doesn't support HDMI mode */
1145 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1146 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1150 .ctl = WRPLL_CTL(0),
1151 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1152 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1156 .ctl = WRPLL_CTL(1),
1157 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1158 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1162 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1163 struct intel_shared_dpll *pll)
1165 const enum intel_dpll_id id = pll->info->id;
1168 val = intel_de_read(dev_priv, DPLL_CTRL1);
1170 val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1171 DPLL_CTRL1_SSC(id) |
1172 DPLL_CTRL1_LINK_RATE_MASK(id));
1173 val |= pll->state.hw_state.ctrl1 << (id * 6);
1175 intel_de_write(dev_priv, DPLL_CTRL1, val);
1176 intel_de_posting_read(dev_priv, DPLL_CTRL1);
1179 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1180 struct intel_shared_dpll *pll)
1182 const struct skl_dpll_regs *regs = skl_dpll_regs;
1183 const enum intel_dpll_id id = pll->info->id;
1185 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1187 intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1188 intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1189 intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1190 intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1192 /* the enable bit is always bit 31 */
1193 intel_de_write(dev_priv, regs[id].ctl,
1194 intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1196 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1197 drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1200 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1201 struct intel_shared_dpll *pll)
1203 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1206 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1207 struct intel_shared_dpll *pll)
1209 const struct skl_dpll_regs *regs = skl_dpll_regs;
1210 const enum intel_dpll_id id = pll->info->id;
1212 /* the enable bit is always bit 31 */
1213 intel_de_write(dev_priv, regs[id].ctl,
1214 intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1215 intel_de_posting_read(dev_priv, regs[id].ctl);
1218 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1219 struct intel_shared_dpll *pll)
1223 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1224 struct intel_shared_dpll *pll,
1225 struct intel_dpll_hw_state *hw_state)
1228 const struct skl_dpll_regs *regs = skl_dpll_regs;
1229 const enum intel_dpll_id id = pll->info->id;
1230 intel_wakeref_t wakeref;
1233 wakeref = intel_display_power_get_if_enabled(dev_priv,
1234 POWER_DOMAIN_DISPLAY_CORE);
1240 val = intel_de_read(dev_priv, regs[id].ctl);
1241 if (!(val & LCPLL_PLL_ENABLE))
1244 val = intel_de_read(dev_priv, DPLL_CTRL1);
1245 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1247 /* avoid reading back stale values if HDMI mode is not enabled */
1248 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1249 hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1250 hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1255 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1260 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1261 struct intel_shared_dpll *pll,
1262 struct intel_dpll_hw_state *hw_state)
1264 const struct skl_dpll_regs *regs = skl_dpll_regs;
1265 const enum intel_dpll_id id = pll->info->id;
1266 intel_wakeref_t wakeref;
1270 wakeref = intel_display_power_get_if_enabled(dev_priv,
1271 POWER_DOMAIN_DISPLAY_CORE);
1277 /* DPLL0 is always enabled since it drives CDCLK */
1278 val = intel_de_read(dev_priv, regs[id].ctl);
1279 if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1282 val = intel_de_read(dev_priv, DPLL_CTRL1);
1283 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1288 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1293 struct skl_wrpll_context {
1294 u64 min_deviation; /* current minimal deviation */
1295 u64 central_freq; /* chosen central freq */
1296 u64 dco_freq; /* chosen dco freq */
1297 unsigned int p; /* chosen divider */
1300 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1302 memset(ctx, 0, sizeof(*ctx));
1304 ctx->min_deviation = U64_MAX;
1307 /* DCO freq must be within +1%/-6% of the DCO central freq */
1308 #define SKL_DCO_MAX_PDEVIATION 100
1309 #define SKL_DCO_MAX_NDEVIATION 600
1311 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1314 unsigned int divider)
1318 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1321 /* positive deviation */
1322 if (dco_freq >= central_freq) {
1323 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1324 deviation < ctx->min_deviation) {
1325 ctx->min_deviation = deviation;
1326 ctx->central_freq = central_freq;
1327 ctx->dco_freq = dco_freq;
1330 /* negative deviation */
1331 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1332 deviation < ctx->min_deviation) {
1333 ctx->min_deviation = deviation;
1334 ctx->central_freq = central_freq;
1335 ctx->dco_freq = dco_freq;
1340 static void skl_wrpll_get_multipliers(unsigned int p,
1341 unsigned int *p0 /* out */,
1342 unsigned int *p1 /* out */,
1343 unsigned int *p2 /* out */)
1347 unsigned int half = p / 2;
1349 if (half == 1 || half == 2 || half == 3 || half == 5) {
1353 } else if (half % 2 == 0) {
1357 } else if (half % 3 == 0) {
1361 } else if (half % 7 == 0) {
1366 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1370 } else if (p == 5 || p == 7) {
1374 } else if (p == 15) {
1378 } else if (p == 21) {
1382 } else if (p == 35) {
1389 struct skl_wrpll_params {
1399 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1403 u32 p0, u32 p1, u32 p2)
1407 switch (central_freq) {
1409 params->central_freq = 0;
1412 params->central_freq = 1;
1415 params->central_freq = 3;
1432 WARN(1, "Incorrect PDiv\n");
1449 WARN(1, "Incorrect KDiv\n");
1452 params->qdiv_ratio = p1;
1453 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1455 dco_freq = p0 * p1 * p2 * afe_clock;
1458 * Intermediate values are in Hz.
1459 * Divide by MHz to match bsepc
1461 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1462 params->dco_fraction =
1463 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1464 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1468 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1470 struct skl_wrpll_params *wrpll_params)
1472 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1473 u64 dco_central_freq[3] = { 8400000000ULL,
1476 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1477 24, 28, 30, 32, 36, 40, 42, 44,
1478 48, 52, 54, 56, 60, 64, 66, 68,
1479 70, 72, 76, 78, 80, 84, 88, 90,
1481 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1482 static const struct {
1486 { even_dividers, ARRAY_SIZE(even_dividers) },
1487 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1489 struct skl_wrpll_context ctx;
1490 unsigned int dco, d, i;
1491 unsigned int p0, p1, p2;
1493 skl_wrpll_context_init(&ctx);
1495 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1496 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1497 for (i = 0; i < dividers[d].n_dividers; i++) {
1498 unsigned int p = dividers[d].list[i];
1499 u64 dco_freq = p * afe_clock;
1501 skl_wrpll_try_divider(&ctx,
1502 dco_central_freq[dco],
1506 * Skip the remaining dividers if we're sure to
1507 * have found the definitive divider, we can't
1508 * improve a 0 deviation.
1510 if (ctx.min_deviation == 0)
1511 goto skip_remaining_dividers;
1515 skip_remaining_dividers:
1517 * If a solution is found with an even divider, prefer
1520 if (d == 0 && ctx.p)
1525 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1530 * gcc incorrectly analyses that these can be used without being
1531 * initialized. To be fair, it's hard to guess.
1534 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1535 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1536 ctx.central_freq, p0, p1, p2);
1541 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1543 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1544 u32 ctrl1, cfgcr1, cfgcr2;
1545 struct skl_wrpll_params wrpll_params = { 0, };
1548 * See comment in intel_dpll_hw_state to understand why we always use 0
1549 * as the DPLL id in this function.
1551 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1553 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1555 if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1556 i915->dpll.ref_clks.nssc,
1560 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1561 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1562 wrpll_params.dco_integer;
1564 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1565 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1566 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1567 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1568 wrpll_params.central_freq;
1570 memset(&crtc_state->dpll_hw_state, 0,
1571 sizeof(crtc_state->dpll_hw_state));
1573 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1574 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1575 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1579 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1580 const struct intel_shared_dpll *pll)
1582 const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
1583 int ref_clock = i915->dpll.ref_clks.nssc;
1584 u32 p0, p1, p2, dco_freq;
1586 p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1587 p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1589 if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1590 p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1596 case DPLL_CFGCR2_PDIV_1:
1599 case DPLL_CFGCR2_PDIV_2:
1602 case DPLL_CFGCR2_PDIV_3:
1605 case DPLL_CFGCR2_PDIV_7:
1611 case DPLL_CFGCR2_KDIV_5:
1614 case DPLL_CFGCR2_KDIV_2:
1617 case DPLL_CFGCR2_KDIV_3:
1620 case DPLL_CFGCR2_KDIV_1:
1625 dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1628 dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1631 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1634 return dco_freq / (p0 * p1 * p2 * 5);
1638 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1643 * See comment in intel_dpll_hw_state to understand why we always use 0
1644 * as the DPLL id in this function.
1646 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1647 switch (crtc_state->port_clock / 2) {
1649 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1652 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1655 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1659 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1662 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1665 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1669 memset(&crtc_state->dpll_hw_state, 0,
1670 sizeof(crtc_state->dpll_hw_state));
1672 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1677 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1678 const struct intel_shared_dpll *pll)
1682 switch ((pll->state.hw_state.ctrl1 &
1683 DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1684 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1685 case DPLL_CTRL1_LINK_RATE_810:
1688 case DPLL_CTRL1_LINK_RATE_1080:
1689 link_clock = 108000;
1691 case DPLL_CTRL1_LINK_RATE_1350:
1692 link_clock = 135000;
1694 case DPLL_CTRL1_LINK_RATE_1620:
1695 link_clock = 162000;
1697 case DPLL_CTRL1_LINK_RATE_2160:
1698 link_clock = 216000;
1700 case DPLL_CTRL1_LINK_RATE_2700:
1701 link_clock = 270000;
1704 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1708 return link_clock * 2;
1711 static bool skl_get_dpll(struct intel_atomic_state *state,
1712 struct intel_crtc *crtc,
1713 struct intel_encoder *encoder)
1715 struct intel_crtc_state *crtc_state =
1716 intel_atomic_get_new_crtc_state(state, crtc);
1717 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1718 struct intel_shared_dpll *pll;
1721 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1722 bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1724 drm_dbg_kms(&i915->drm,
1725 "Could not get HDMI pll dividers.\n");
1728 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1729 bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1731 drm_dbg_kms(&i915->drm,
1732 "Could not set DP dpll HW state.\n");
1739 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1740 pll = intel_find_shared_dpll(state, crtc,
1741 &crtc_state->dpll_hw_state,
1742 BIT(DPLL_ID_SKL_DPLL0));
1744 pll = intel_find_shared_dpll(state, crtc,
1745 &crtc_state->dpll_hw_state,
1746 BIT(DPLL_ID_SKL_DPLL3) |
1747 BIT(DPLL_ID_SKL_DPLL2) |
1748 BIT(DPLL_ID_SKL_DPLL1));
1752 intel_reference_shared_dpll(state, crtc,
1753 pll, &crtc_state->dpll_hw_state);
1755 crtc_state->shared_dpll = pll;
1760 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1761 const struct intel_shared_dpll *pll)
1764 * ctrl1 register is already shifted for each pll, just use 0 to get
1765 * the internal shift for each field
1767 if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1768 return skl_ddi_wrpll_get_freq(i915, pll);
1770 return skl_ddi_lcpll_get_freq(i915, pll);
1773 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1776 i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1779 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1780 const struct intel_dpll_hw_state *hw_state)
1782 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1783 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1789 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1790 .enable = skl_ddi_pll_enable,
1791 .disable = skl_ddi_pll_disable,
1792 .get_hw_state = skl_ddi_pll_get_hw_state,
1793 .get_freq = skl_ddi_pll_get_freq,
1796 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1797 .enable = skl_ddi_dpll0_enable,
1798 .disable = skl_ddi_dpll0_disable,
1799 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1800 .get_freq = skl_ddi_pll_get_freq,
1803 static const struct dpll_info skl_plls[] = {
1804 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1805 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1806 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1807 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1811 static const struct intel_dpll_mgr skl_pll_mgr = {
1812 .dpll_info = skl_plls,
1813 .get_dplls = skl_get_dpll,
1814 .put_dplls = intel_put_dpll,
1815 .update_ref_clks = skl_update_dpll_ref_clks,
1816 .dump_hw_state = skl_dump_hw_state,
1819 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1820 struct intel_shared_dpll *pll)
1823 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1825 enum dpio_channel ch;
1827 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1829 /* Non-SSC reference */
1830 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1831 temp |= PORT_PLL_REF_SEL;
1832 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1834 if (IS_GEMINILAKE(dev_priv)) {
1835 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1836 temp |= PORT_PLL_POWER_ENABLE;
1837 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1839 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1840 PORT_PLL_POWER_STATE), 200))
1841 drm_err(&dev_priv->drm,
1842 "Power state not set for PLL:%d\n", port);
1845 /* Disable 10 bit clock */
1846 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1847 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1848 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1851 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1852 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1853 temp |= pll->state.hw_state.ebb0;
1854 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1856 /* Write M2 integer */
1857 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1858 temp &= ~PORT_PLL_M2_MASK;
1859 temp |= pll->state.hw_state.pll0;
1860 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1863 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1864 temp &= ~PORT_PLL_N_MASK;
1865 temp |= pll->state.hw_state.pll1;
1866 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1868 /* Write M2 fraction */
1869 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1870 temp &= ~PORT_PLL_M2_FRAC_MASK;
1871 temp |= pll->state.hw_state.pll2;
1872 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1874 /* Write M2 fraction enable */
1875 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1876 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1877 temp |= pll->state.hw_state.pll3;
1878 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1881 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1882 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1883 temp &= ~PORT_PLL_INT_COEFF_MASK;
1884 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1885 temp |= pll->state.hw_state.pll6;
1886 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1888 /* Write calibration val */
1889 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1890 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1891 temp |= pll->state.hw_state.pll8;
1892 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1894 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1895 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1896 temp |= pll->state.hw_state.pll9;
1897 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1899 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1900 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1901 temp &= ~PORT_PLL_DCO_AMP_MASK;
1902 temp |= pll->state.hw_state.pll10;
1903 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1905 /* Recalibrate with new settings */
1906 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1907 temp |= PORT_PLL_RECALIBRATE;
1908 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1909 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1910 temp |= pll->state.hw_state.ebb4;
1911 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1914 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1915 temp |= PORT_PLL_ENABLE;
1916 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1917 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1919 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1921 drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1923 if (IS_GEMINILAKE(dev_priv)) {
1924 temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1925 temp |= DCC_DELAY_RANGE_2;
1926 intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1930 * While we write to the group register to program all lanes at once we
1931 * can read only lane registers and we pick lanes 0/1 for that.
1933 temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1934 temp &= ~LANE_STAGGER_MASK;
1935 temp &= ~LANESTAGGER_STRAP_OVRD;
1936 temp |= pll->state.hw_state.pcsdw12;
1937 intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1940 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1941 struct intel_shared_dpll *pll)
1943 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1946 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1947 temp &= ~PORT_PLL_ENABLE;
1948 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1949 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1951 if (IS_GEMINILAKE(dev_priv)) {
1952 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1953 temp &= ~PORT_PLL_POWER_ENABLE;
1954 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1956 if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1957 PORT_PLL_POWER_STATE), 200))
1958 drm_err(&dev_priv->drm,
1959 "Power state not reset for PLL:%d\n", port);
1963 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1964 struct intel_shared_dpll *pll,
1965 struct intel_dpll_hw_state *hw_state)
1967 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1968 intel_wakeref_t wakeref;
1970 enum dpio_channel ch;
1974 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1976 wakeref = intel_display_power_get_if_enabled(dev_priv,
1977 POWER_DOMAIN_DISPLAY_CORE);
1983 val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1984 if (!(val & PORT_PLL_ENABLE))
1987 hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1988 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1990 hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1991 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1993 hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1994 hw_state->pll0 &= PORT_PLL_M2_MASK;
1996 hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1997 hw_state->pll1 &= PORT_PLL_N_MASK;
1999 hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2000 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2002 hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2003 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2005 hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2006 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2007 PORT_PLL_INT_COEFF_MASK |
2008 PORT_PLL_GAIN_CTL_MASK;
2010 hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2011 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2013 hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2014 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2016 hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2017 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2018 PORT_PLL_DCO_AMP_MASK;
2021 * While we write to the group register to program all lanes at once we
2022 * can read only lane registers. We configure all lanes the same way, so
2023 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2025 hw_state->pcsdw12 = intel_de_read(dev_priv,
2026 BXT_PORT_PCS_DW12_LN01(phy, ch));
2027 if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2028 drm_dbg(&dev_priv->drm,
2029 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2031 intel_de_read(dev_priv,
2032 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2033 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2038 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2043 /* bxt clock parameters */
2044 struct bxt_clk_div {
2056 /* pre-calculated values for DP linkrates */
2057 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2058 {162000, 4, 2, 32, 1677722, 1, 1},
2059 {270000, 4, 1, 27, 0, 0, 1},
2060 {540000, 2, 1, 27, 0, 0, 1},
2061 {216000, 3, 2, 32, 1677722, 1, 1},
2062 {243000, 4, 1, 24, 1258291, 1, 1},
2063 {324000, 4, 1, 32, 1677722, 1, 1},
2064 {432000, 3, 1, 32, 1677722, 1, 1}
2068 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2069 struct bxt_clk_div *clk_div)
2071 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2072 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2073 struct dpll best_clock;
2075 /* Calculate HDMI div */
2077 * FIXME: tie the following calculation into
2078 * i9xx_crtc_compute_clock
2080 if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2081 drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2082 crtc_state->port_clock,
2083 pipe_name(crtc->pipe));
2087 clk_div->p1 = best_clock.p1;
2088 clk_div->p2 = best_clock.p2;
2089 drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2090 clk_div->n = best_clock.n;
2091 clk_div->m2_int = best_clock.m2 >> 22;
2092 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2093 clk_div->m2_frac_en = clk_div->m2_frac != 0;
2095 clk_div->vco = best_clock.vco;
2100 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2101 struct bxt_clk_div *clk_div)
2103 int clock = crtc_state->port_clock;
2106 *clk_div = bxt_dp_clk_val[0];
2107 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2108 if (bxt_dp_clk_val[i].clock == clock) {
2109 *clk_div = bxt_dp_clk_val[i];
2114 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2117 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2118 const struct bxt_clk_div *clk_div)
2120 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2121 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2122 int clock = crtc_state->port_clock;
2123 int vco = clk_div->vco;
2124 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2127 memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2129 if (vco >= 6200000 && vco <= 6700000) {
2134 } else if ((vco > 5400000 && vco < 6200000) ||
2135 (vco >= 4800000 && vco < 5400000)) {
2140 } else if (vco == 5400000) {
2146 drm_err(&i915->drm, "Invalid VCO\n");
2152 else if (clock > 135000)
2154 else if (clock > 67000)
2156 else if (clock > 33000)
2161 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2162 dpll_hw_state->pll0 = clk_div->m2_int;
2163 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2164 dpll_hw_state->pll2 = clk_div->m2_frac;
2166 if (clk_div->m2_frac_en)
2167 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2169 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2170 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2172 dpll_hw_state->pll8 = targ_cnt;
2174 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2176 dpll_hw_state->pll10 =
2177 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2178 | PORT_PLL_DCO_AMP_OVR_EN_H;
2180 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2182 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2188 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2190 struct bxt_clk_div clk_div = {};
2192 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2194 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2198 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2200 struct bxt_clk_div clk_div = {};
2202 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2204 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2207 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2208 const struct intel_shared_dpll *pll)
2210 const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2214 clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2215 if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2216 clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2217 clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2218 clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2219 clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2221 return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2224 static bool bxt_get_dpll(struct intel_atomic_state *state,
2225 struct intel_crtc *crtc,
2226 struct intel_encoder *encoder)
2228 struct intel_crtc_state *crtc_state =
2229 intel_atomic_get_new_crtc_state(state, crtc);
2230 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2231 struct intel_shared_dpll *pll;
2232 enum intel_dpll_id id;
2234 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2235 !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2238 if (intel_crtc_has_dp_encoder(crtc_state) &&
2239 !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2242 /* 1:1 mapping between ports and PLLs */
2243 id = (enum intel_dpll_id) encoder->port;
2244 pll = intel_get_shared_dpll_by_id(dev_priv, id);
2246 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2247 crtc->base.base.id, crtc->base.name, pll->info->name);
2249 intel_reference_shared_dpll(state, crtc,
2250 pll, &crtc_state->dpll_hw_state);
2252 crtc_state->shared_dpll = pll;
2257 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2259 i915->dpll.ref_clks.ssc = 100000;
2260 i915->dpll.ref_clks.nssc = 100000;
2261 /* DSI non-SSC ref 19.2MHz */
2264 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2265 const struct intel_dpll_hw_state *hw_state)
2267 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2268 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2269 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2283 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2284 .enable = bxt_ddi_pll_enable,
2285 .disable = bxt_ddi_pll_disable,
2286 .get_hw_state = bxt_ddi_pll_get_hw_state,
2287 .get_freq = bxt_ddi_pll_get_freq,
2290 static const struct dpll_info bxt_plls[] = {
2291 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2292 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2293 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2297 static const struct intel_dpll_mgr bxt_pll_mgr = {
2298 .dpll_info = bxt_plls,
2299 .get_dplls = bxt_get_dpll,
2300 .put_dplls = intel_put_dpll,
2301 .update_ref_clks = bxt_update_dpll_ref_clks,
2302 .dump_hw_state = bxt_dump_hw_state,
2305 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2306 struct intel_shared_dpll *pll)
2308 const enum intel_dpll_id id = pll->info->id;
2311 /* 1. Enable DPLL power in DPLL_ENABLE. */
2312 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2313 val |= PLL_POWER_ENABLE;
2314 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2316 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2317 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2318 PLL_POWER_STATE, 5))
2319 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2322 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2323 * select DP mode, and set DP link rate.
2325 val = pll->state.hw_state.cfgcr0;
2326 intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2328 /* 4. Reab back to ensure writes completed */
2329 intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2331 /* 3. Configure DPLL_CFGCR0 */
2332 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
2333 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2334 val = pll->state.hw_state.cfgcr1;
2335 intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2336 /* 4. Reab back to ensure writes completed */
2337 intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2341 * 5. If the frequency will result in a change to the voltage
2342 * requirement, follow the Display Voltage Frequency Switching
2343 * Sequence Before Frequency Change
2345 * Note: DVFS is actually handled via the cdclk code paths,
2346 * hence we do nothing here.
2349 /* 6. Enable DPLL in DPLL_ENABLE. */
2350 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2352 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2354 /* 7. Wait for PLL lock status in DPLL_ENABLE. */
2355 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2356 drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2359 * 8. If the frequency will result in a change to the voltage
2360 * requirement, follow the Display Voltage Frequency Switching
2361 * Sequence After Frequency Change
2363 * Note: DVFS is actually handled via the cdclk code paths,
2364 * hence we do nothing here.
2368 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2369 * Done at intel_ddi_clk_select
2373 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2374 struct intel_shared_dpll *pll)
2376 const enum intel_dpll_id id = pll->info->id;
2380 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2381 * Done at intel_ddi_post_disable
2385 * 2. If the frequency will result in a change to the voltage
2386 * requirement, follow the Display Voltage Frequency Switching
2387 * Sequence Before Frequency Change
2389 * Note: DVFS is actually handled via the cdclk code paths,
2390 * hence we do nothing here.
2393 /* 3. Disable DPLL through DPLL_ENABLE. */
2394 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2396 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2398 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2399 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2400 drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2403 * 5. If the frequency will result in a change to the voltage
2404 * requirement, follow the Display Voltage Frequency Switching
2405 * Sequence After Frequency Change
2407 * Note: DVFS is actually handled via the cdclk code paths,
2408 * hence we do nothing here.
2411 /* 6. Disable DPLL power in DPLL_ENABLE. */
2412 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2413 val &= ~PLL_POWER_ENABLE;
2414 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2416 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2417 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2418 PLL_POWER_STATE, 5))
2419 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2422 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2423 struct intel_shared_dpll *pll,
2424 struct intel_dpll_hw_state *hw_state)
2426 const enum intel_dpll_id id = pll->info->id;
2427 intel_wakeref_t wakeref;
2431 wakeref = intel_display_power_get_if_enabled(dev_priv,
2432 POWER_DOMAIN_DISPLAY_CORE);
2438 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2439 if (!(val & PLL_ENABLE))
2442 val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2443 hw_state->cfgcr0 = val;
2445 /* avoid reading back stale values if HDMI mode is not enabled */
2446 if (val & DPLL_CFGCR0_HDMI_MODE) {
2447 hw_state->cfgcr1 = intel_de_read(dev_priv,
2448 CNL_DPLL_CFGCR1(id));
2453 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2458 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2459 int *qdiv, int *kdiv)
2462 if (bestdiv % 2 == 0) {
2467 } else if (bestdiv % 4 == 0) {
2469 *qdiv = bestdiv / 4;
2471 } else if (bestdiv % 6 == 0) {
2473 *qdiv = bestdiv / 6;
2475 } else if (bestdiv % 5 == 0) {
2477 *qdiv = bestdiv / 10;
2479 } else if (bestdiv % 14 == 0) {
2481 *qdiv = bestdiv / 14;
2485 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2489 } else { /* 9, 15, 21 */
2490 *pdiv = bestdiv / 3;
2497 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2498 u32 dco_freq, u32 ref_freq,
2499 int pdiv, int qdiv, int kdiv)
2514 WARN(1, "Incorrect KDiv\n");
2531 WARN(1, "Incorrect PDiv\n");
2534 WARN_ON(kdiv != 2 && qdiv != 1);
2536 params->qdiv_ratio = qdiv;
2537 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2539 dco = div_u64((u64)dco_freq << 15, ref_freq);
2541 params->dco_integer = dco >> 15;
2542 params->dco_fraction = dco & 0x7fff;
2546 __cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2547 struct skl_wrpll_params *wrpll_params,
2550 u32 afe_clock = crtc_state->port_clock * 5;
2551 u32 dco_min = 7998000;
2552 u32 dco_max = 10000000;
2553 u32 dco_mid = (dco_min + dco_max) / 2;
2554 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2555 18, 20, 24, 28, 30, 32, 36, 40,
2556 42, 44, 48, 50, 52, 54, 56, 60,
2557 64, 66, 68, 70, 72, 76, 78, 80,
2558 84, 88, 90, 92, 96, 98, 100, 102,
2559 3, 5, 7, 9, 15, 21 };
2560 u32 dco, best_dco = 0, dco_centrality = 0;
2561 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2562 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2564 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2565 dco = afe_clock * dividers[d];
2567 if ((dco <= dco_max) && (dco >= dco_min)) {
2568 dco_centrality = abs(dco - dco_mid);
2570 if (dco_centrality < best_dco_centrality) {
2571 best_dco_centrality = dco_centrality;
2572 best_div = dividers[d];
2581 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2582 cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2589 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2590 struct skl_wrpll_params *wrpll_params)
2592 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2594 return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
2595 i915->dpll.ref_clks.nssc);
2598 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2601 struct skl_wrpll_params wrpll_params = { 0, };
2603 cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2605 if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2608 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2609 wrpll_params.dco_integer;
2611 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2612 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2613 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2614 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2615 DPLL_CFGCR1_CENTRAL_FREQ;
2617 memset(&crtc_state->dpll_hw_state, 0,
2618 sizeof(crtc_state->dpll_hw_state));
2620 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2621 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2626 * Display WA #22010492432: tgl
2627 * Program half of the nominal DCO divider fraction value.
2630 tgl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2632 return IS_TIGERLAKE(i915) && i915->dpll.ref_clks.nssc == 38400;
2635 static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
2636 const struct intel_shared_dpll *pll,
2639 const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2641 u32 p0, p1, p2, dco_freq;
2643 p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2644 p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2646 if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2647 p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2648 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2654 case DPLL_CFGCR1_PDIV_2:
2657 case DPLL_CFGCR1_PDIV_3:
2660 case DPLL_CFGCR1_PDIV_5:
2663 case DPLL_CFGCR1_PDIV_7:
2669 case DPLL_CFGCR1_KDIV_1:
2672 case DPLL_CFGCR1_KDIV_2:
2675 case DPLL_CFGCR1_KDIV_3:
2680 dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2683 dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2684 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2686 if (tgl_combo_pll_div_frac_wa_needed(dev_priv))
2689 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2691 if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
2694 return dco_freq / (p0 * p1 * p2 * 5);
2697 static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
2698 const struct intel_shared_dpll *pll)
2700 return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
2704 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2708 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2710 switch (crtc_state->port_clock / 2) {
2712 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2715 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2718 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2722 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2725 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2728 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2731 /* Some SKUs may require elevated I/O voltage to support this */
2732 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2735 /* Some SKUs may require elevated I/O voltage to support this */
2736 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2740 memset(&crtc_state->dpll_hw_state, 0,
2741 sizeof(crtc_state->dpll_hw_state));
2743 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2748 static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
2749 const struct intel_shared_dpll *pll)
2753 switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
2754 case DPLL_CFGCR0_LINK_RATE_810:
2757 case DPLL_CFGCR0_LINK_RATE_1080:
2758 link_clock = 108000;
2760 case DPLL_CFGCR0_LINK_RATE_1350:
2761 link_clock = 135000;
2763 case DPLL_CFGCR0_LINK_RATE_1620:
2764 link_clock = 162000;
2766 case DPLL_CFGCR0_LINK_RATE_2160:
2767 link_clock = 216000;
2769 case DPLL_CFGCR0_LINK_RATE_2700:
2770 link_clock = 270000;
2772 case DPLL_CFGCR0_LINK_RATE_3240:
2773 link_clock = 324000;
2775 case DPLL_CFGCR0_LINK_RATE_4050:
2776 link_clock = 405000;
2779 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
2783 return link_clock * 2;
2786 static bool cnl_get_dpll(struct intel_atomic_state *state,
2787 struct intel_crtc *crtc,
2788 struct intel_encoder *encoder)
2790 struct intel_crtc_state *crtc_state =
2791 intel_atomic_get_new_crtc_state(state, crtc);
2792 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2793 struct intel_shared_dpll *pll;
2796 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2797 bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2799 drm_dbg_kms(&i915->drm,
2800 "Could not get HDMI pll dividers.\n");
2803 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
2804 bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2806 drm_dbg_kms(&i915->drm,
2807 "Could not set DP dpll HW state.\n");
2811 drm_dbg_kms(&i915->drm,
2812 "Skip DPLL setup for output_types 0x%x\n",
2813 crtc_state->output_types);
2817 pll = intel_find_shared_dpll(state, crtc,
2818 &crtc_state->dpll_hw_state,
2819 BIT(DPLL_ID_SKL_DPLL2) |
2820 BIT(DPLL_ID_SKL_DPLL1) |
2821 BIT(DPLL_ID_SKL_DPLL0));
2823 drm_dbg_kms(&i915->drm, "No PLL selected\n");
2827 intel_reference_shared_dpll(state, crtc,
2828 pll, &crtc_state->dpll_hw_state);
2830 crtc_state->shared_dpll = pll;
2835 static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
2836 const struct intel_shared_dpll *pll)
2838 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
2839 return cnl_ddi_wrpll_get_freq(i915, pll);
2841 return cnl_ddi_lcpll_get_freq(i915, pll);
2844 static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
2846 /* No SSC reference */
2847 i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
2850 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2851 const struct intel_dpll_hw_state *hw_state)
2853 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2854 "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2859 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2860 .enable = cnl_ddi_pll_enable,
2861 .disable = cnl_ddi_pll_disable,
2862 .get_hw_state = cnl_ddi_pll_get_hw_state,
2863 .get_freq = cnl_ddi_pll_get_freq,
2866 static const struct dpll_info cnl_plls[] = {
2867 { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2868 { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2869 { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2873 static const struct intel_dpll_mgr cnl_pll_mgr = {
2874 .dpll_info = cnl_plls,
2875 .get_dplls = cnl_get_dpll,
2876 .put_dplls = intel_put_dpll,
2877 .update_ref_clks = cnl_update_dpll_ref_clks,
2878 .dump_hw_state = cnl_dump_hw_state,
2881 struct icl_combo_pll_params {
2883 struct skl_wrpll_params wrpll;
2887 * These values alrea already adjusted: they're the bits we write to the
2888 * registers, not the logical values.
2890 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2892 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2893 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2895 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2896 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2898 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2899 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2901 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2902 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2904 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2905 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2907 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2908 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2910 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2911 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2913 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2914 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2918 /* Also used for 38.4 MHz values. */
2919 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2921 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2922 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2924 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2925 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2927 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2928 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2930 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2931 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2933 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2934 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2936 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2937 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2939 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2940 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2942 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2943 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2946 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2947 .dco_integer = 0x151, .dco_fraction = 0x4000,
2948 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2951 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2952 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2953 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2956 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2957 .dco_integer = 0x54, .dco_fraction = 0x3000,
2958 /* the following params are unused */
2959 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2962 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2963 .dco_integer = 0x43, .dco_fraction = 0x4000,
2964 /* the following params are unused */
2967 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2968 struct skl_wrpll_params *pll_params)
2970 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2971 const struct icl_combo_pll_params *params =
2972 dev_priv->dpll.ref_clks.nssc == 24000 ?
2973 icl_dp_combo_pll_24MHz_values :
2974 icl_dp_combo_pll_19_2MHz_values;
2975 int clock = crtc_state->port_clock;
2978 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2979 if (clock == params[i].clock) {
2980 *pll_params = params[i].wrpll;
2985 MISSING_CASE(clock);
2989 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2990 struct skl_wrpll_params *pll_params)
2992 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2994 if (INTEL_GEN(dev_priv) >= 12) {
2995 switch (dev_priv->dpll.ref_clks.nssc) {
2997 MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3001 *pll_params = tgl_tbt_pll_19_2MHz_values;
3004 *pll_params = tgl_tbt_pll_24MHz_values;
3008 switch (dev_priv->dpll.ref_clks.nssc) {
3010 MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3014 *pll_params = icl_tbt_pll_19_2MHz_values;
3017 *pll_params = icl_tbt_pll_24MHz_values;
3025 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
3026 const struct intel_shared_dpll *pll)
3029 * The PLL outputs multiple frequencies at the same time, selection is
3030 * made at DDI clock mux level.
3032 drm_WARN_ON(&i915->drm, 1);
3037 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
3039 int ref_clock = i915->dpll.ref_clks.nssc;
3042 * For ICL+, the spec states: if reference frequency is 38.4,
3043 * use 19.2 because the DPLL automatically divides that by 2.
3045 if (ref_clock == 38400)
3052 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
3053 struct skl_wrpll_params *wrpll_params)
3055 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3057 return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
3058 icl_wrpll_ref_clock(i915));
3061 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
3062 const struct intel_shared_dpll *pll)
3064 return __cnl_ddi_wrpll_get_freq(i915, pll,
3065 icl_wrpll_ref_clock(i915));
3068 static void icl_calc_dpll_state(struct drm_i915_private *i915,
3069 const struct skl_wrpll_params *pll_params,
3070 struct intel_dpll_hw_state *pll_state)
3072 u32 dco_fraction = pll_params->dco_fraction;
3074 memset(pll_state, 0, sizeof(*pll_state));
3076 if (tgl_combo_pll_div_frac_wa_needed(i915))
3077 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
3079 pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
3080 pll_params->dco_integer;
3082 pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
3083 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
3084 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
3085 DPLL_CFGCR1_PDIV(pll_params->pdiv);
3087 if (INTEL_GEN(i915) >= 12)
3088 pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
3090 pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
3093 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
3095 return id - DPLL_ID_ICL_MGPLL1;
3098 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
3100 return tc_port + DPLL_ID_ICL_MGPLL1;
3103 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
3104 u32 *target_dco_khz,
3105 struct intel_dpll_hw_state *state,
3108 u32 dco_min_freq, dco_max_freq;
3109 int div1_vals[] = {7, 5, 3, 2};
3113 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
3114 dco_max_freq = is_dp ? 8100000 : 10000000;
3116 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
3117 int div1 = div1_vals[i];
3119 for (div2 = 10; div2 > 0; div2--) {
3120 int dco = div1 * div2 * clock_khz * 5;
3121 int a_divratio, tlinedrv, inputsel;
3124 if (dco < dco_min_freq || dco > dco_max_freq)
3129 * Note: a_divratio not matching TGL BSpec
3130 * algorithm but matching hardcoded values and
3131 * working on HW for DP alt-mode at least
3133 a_divratio = is_dp ? 10 : 5;
3134 tlinedrv = is_dkl ? 1 : 2;
3139 inputsel = is_dp ? 0 : 1;
3146 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
3149 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
3152 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
3155 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
3159 *target_dco_khz = dco;
3161 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
3163 state->mg_clktop2_coreclkctl1 =
3164 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3166 state->mg_clktop2_hsclkctl =
3167 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3168 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3170 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3180 * The specification for this function uses real numbers, so the math had to be
3181 * adapted to integer-only calculation, that's why it looks so different.
3183 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3184 struct intel_dpll_hw_state *pll_state)
3186 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3187 int refclk_khz = dev_priv->dpll.ref_clks.nssc;
3188 int clock = crtc_state->port_clock;
3189 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3190 u32 iref_ndiv, iref_trim, iref_pulse_w;
3191 u32 prop_coeff, int_coeff;
3192 u32 tdc_targetcnt, feedfwgain;
3193 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3195 bool use_ssc = false;
3196 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3197 bool is_dkl = INTEL_GEN(dev_priv) >= 12;
3199 memset(pll_state, 0, sizeof(*pll_state));
3201 if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3202 pll_state, is_dkl)) {
3203 drm_dbg_kms(&dev_priv->drm,
3204 "Failed to find divisors for clock %d\n", clock);
3209 m2div_int = dco_khz / (refclk_khz * m1div);
3210 if (m2div_int > 255) {
3213 m2div_int = dco_khz / (refclk_khz * m1div);
3216 if (m2div_int > 255) {
3217 drm_dbg_kms(&dev_priv->drm,
3218 "Failed to find mdiv for clock %d\n",
3223 m2div_rem = dco_khz % (refclk_khz * m1div);
3225 tmp = (u64)m2div_rem * (1 << 22);
3226 do_div(tmp, refclk_khz * m1div);
3229 switch (refclk_khz) {
3246 MISSING_CASE(refclk_khz);
3251 * tdc_res = 0.000003
3252 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3254 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3255 * was supposed to be a division, but we rearranged the operations of
3256 * the formula to avoid early divisions so we don't multiply the
3259 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3260 * we also rearrange to work with integers.
3262 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3263 * last division by 10.
3265 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3268 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3269 * 32 bits. That's not a problem since we round the division down
3272 feedfwgain = (use_ssc || m2div_rem > 0) ?
3273 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3275 if (dco_khz >= 9000000) {
3284 tmp = mul_u32_u32(dco_khz, 47 * 32);
3285 do_div(tmp, refclk_khz * m1div * 10000);
3288 tmp = mul_u32_u32(dco_khz, 1000);
3289 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3296 /* write pll_state calculations */
3298 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3299 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3300 DKL_PLL_DIV0_FBPREDIV(m1div) |
3301 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3303 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3304 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3306 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3307 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3308 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3309 (use_ssc ? DKL_PLL_SSC_EN : 0);
3311 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3312 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3314 pll_state->mg_pll_tdc_coldst_bias =
3315 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3316 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3319 pll_state->mg_pll_div0 =
3320 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3321 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3322 MG_PLL_DIV0_FBDIV_INT(m2div_int);
3324 pll_state->mg_pll_div1 =
3325 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3326 MG_PLL_DIV1_DITHER_DIV_2 |
3327 MG_PLL_DIV1_NDIVRATIO(1) |
3328 MG_PLL_DIV1_FBPREDIV(m1div);
3330 pll_state->mg_pll_lf =
3331 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3332 MG_PLL_LF_AFCCNTSEL_512 |
3333 MG_PLL_LF_GAINCTRL(1) |
3334 MG_PLL_LF_INT_COEFF(int_coeff) |
3335 MG_PLL_LF_PROP_COEFF(prop_coeff);
3337 pll_state->mg_pll_frac_lock =
3338 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3339 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3340 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3341 MG_PLL_FRAC_LOCK_DCODITHEREN |
3342 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3343 if (use_ssc || m2div_rem > 0)
3344 pll_state->mg_pll_frac_lock |=
3345 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3347 pll_state->mg_pll_ssc =
3348 (use_ssc ? MG_PLL_SSC_EN : 0) |
3349 MG_PLL_SSC_TYPE(2) |
3350 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3351 MG_PLL_SSC_STEPNUM(ssc_steplog) |
3353 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3355 pll_state->mg_pll_tdc_coldst_bias =
3356 MG_PLL_TDC_COLDST_COLDSTART |
3357 MG_PLL_TDC_COLDST_IREFINT_EN |
3358 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3359 MG_PLL_TDC_TDCOVCCORR_EN |
3360 MG_PLL_TDC_TDCSEL(3);
3362 pll_state->mg_pll_bias =
3363 MG_PLL_BIAS_BIAS_GB_SEL(3) |
3364 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3365 MG_PLL_BIAS_BIAS_BONUS(10) |
3366 MG_PLL_BIAS_BIASCAL_EN |
3367 MG_PLL_BIAS_CTRIM(12) |
3368 MG_PLL_BIAS_VREF_RDAC(4) |
3369 MG_PLL_BIAS_IREFTRIM(iref_trim);
3371 if (refclk_khz == 38400) {
3372 pll_state->mg_pll_tdc_coldst_bias_mask =
3373 MG_PLL_TDC_COLDST_COLDSTART;
3374 pll_state->mg_pll_bias_mask = 0;
3376 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3377 pll_state->mg_pll_bias_mask = -1U;
3380 pll_state->mg_pll_tdc_coldst_bias &=
3381 pll_state->mg_pll_tdc_coldst_bias_mask;
3382 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3388 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3389 const struct intel_shared_dpll *pll)
3391 const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
3392 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3395 ref_clock = dev_priv->dpll.ref_clks.nssc;
3397 if (INTEL_GEN(dev_priv) >= 12) {
3398 m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3399 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3400 m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3402 if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3403 m2_frac = pll_state->mg_pll_bias &
3404 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3405 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3410 m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3411 m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3413 if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3414 m2_frac = pll_state->mg_pll_div0 &
3415 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3416 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3422 switch (pll_state->mg_clktop2_hsclkctl &
3423 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3424 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3427 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3430 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3433 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3437 MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3441 div2 = (pll_state->mg_clktop2_hsclkctl &
3442 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3443 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3445 /* div2 value of 0 is same as 1 means no div */
3450 * Adjust the original formula to delay the division by 2^22 in order to
3451 * minimize possible rounding errors.
3453 tmp = (u64)m1 * m2_int * ref_clock +
3454 (((u64)m1 * m2_frac * ref_clock) >> 22);
3455 tmp = div_u64(tmp, 5 * div1 * div2);
3461 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3462 * @crtc_state: state for the CRTC to select the DPLL for
3463 * @port_dpll_id: the active @port_dpll_id to select
3465 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3468 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3469 enum icl_port_dpll_id port_dpll_id)
3471 struct icl_port_dpll *port_dpll =
3472 &crtc_state->icl_port_dplls[port_dpll_id];
3474 crtc_state->shared_dpll = port_dpll->pll;
3475 crtc_state->dpll_hw_state = port_dpll->hw_state;
3478 static void icl_update_active_dpll(struct intel_atomic_state *state,
3479 struct intel_crtc *crtc,
3480 struct intel_encoder *encoder)
3482 struct intel_crtc_state *crtc_state =
3483 intel_atomic_get_new_crtc_state(state, crtc);
3484 struct intel_digital_port *primary_port;
3485 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3487 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3488 enc_to_mst(encoder)->primary :
3489 enc_to_dig_port(encoder);
3492 (primary_port->tc_mode == TC_PORT_DP_ALT ||
3493 primary_port->tc_mode == TC_PORT_LEGACY))
3494 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3496 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3499 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3501 if (!(i915->hti_state & HDPORT_ENABLED))
3504 return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3507 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3508 struct intel_crtc *crtc,
3509 struct intel_encoder *encoder)
3511 struct intel_crtc_state *crtc_state =
3512 intel_atomic_get_new_crtc_state(state, crtc);
3513 struct skl_wrpll_params pll_params = { };
3514 struct icl_port_dpll *port_dpll =
3515 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3516 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3517 enum port port = encoder->port;
3518 unsigned long dpll_mask;
3521 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3522 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3523 ret = icl_calc_wrpll(crtc_state, &pll_params);
3525 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3528 drm_dbg_kms(&dev_priv->drm,
3529 "Could not calculate combo PHY PLL state.\n");
3534 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3536 if (IS_ROCKETLAKE(dev_priv)) {
3538 BIT(DPLL_ID_EHL_DPLL4) |
3539 BIT(DPLL_ID_ICL_DPLL1) |
3540 BIT(DPLL_ID_ICL_DPLL0);
3541 } else if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) {
3543 BIT(DPLL_ID_EHL_DPLL4) |
3544 BIT(DPLL_ID_ICL_DPLL1) |
3545 BIT(DPLL_ID_ICL_DPLL0);
3547 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3550 /* Eliminate DPLLs from consideration if reserved by HTI */
3551 dpll_mask &= ~intel_get_hti_plls(dev_priv);
3553 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3554 &port_dpll->hw_state,
3556 if (!port_dpll->pll) {
3557 drm_dbg_kms(&dev_priv->drm,
3558 "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3559 encoder->base.base.id, encoder->base.name);
3563 intel_reference_shared_dpll(state, crtc,
3564 port_dpll->pll, &port_dpll->hw_state);
3566 icl_update_active_dpll(state, crtc, encoder);
3571 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3572 struct intel_crtc *crtc,
3573 struct intel_encoder *encoder)
3575 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3576 struct intel_crtc_state *crtc_state =
3577 intel_atomic_get_new_crtc_state(state, crtc);
3578 struct skl_wrpll_params pll_params = { };
3579 struct icl_port_dpll *port_dpll;
3580 enum intel_dpll_id dpll_id;
3582 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3583 if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3584 drm_dbg_kms(&dev_priv->drm,
3585 "Could not calculate TBT PLL state.\n");
3589 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3591 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3592 &port_dpll->hw_state,
3593 BIT(DPLL_ID_ICL_TBTPLL));
3594 if (!port_dpll->pll) {
3595 drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3598 intel_reference_shared_dpll(state, crtc,
3599 port_dpll->pll, &port_dpll->hw_state);
3602 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3603 if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3604 drm_dbg_kms(&dev_priv->drm,
3605 "Could not calculate MG PHY PLL state.\n");
3606 goto err_unreference_tbt_pll;
3609 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3611 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3612 &port_dpll->hw_state,
3614 if (!port_dpll->pll) {
3615 drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3616 goto err_unreference_tbt_pll;
3618 intel_reference_shared_dpll(state, crtc,
3619 port_dpll->pll, &port_dpll->hw_state);
3621 icl_update_active_dpll(state, crtc, encoder);
3625 err_unreference_tbt_pll:
3626 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3627 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3632 static bool icl_get_dplls(struct intel_atomic_state *state,
3633 struct intel_crtc *crtc,
3634 struct intel_encoder *encoder)
3636 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3637 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3639 if (intel_phy_is_combo(dev_priv, phy))
3640 return icl_get_combo_phy_dpll(state, crtc, encoder);
3641 else if (intel_phy_is_tc(dev_priv, phy))
3642 return icl_get_tc_phy_dplls(state, crtc, encoder);
3649 static void icl_put_dplls(struct intel_atomic_state *state,
3650 struct intel_crtc *crtc)
3652 const struct intel_crtc_state *old_crtc_state =
3653 intel_atomic_get_old_crtc_state(state, crtc);
3654 struct intel_crtc_state *new_crtc_state =
3655 intel_atomic_get_new_crtc_state(state, crtc);
3656 enum icl_port_dpll_id id;
3658 new_crtc_state->shared_dpll = NULL;
3660 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3661 const struct icl_port_dpll *old_port_dpll =
3662 &old_crtc_state->icl_port_dplls[id];
3663 struct icl_port_dpll *new_port_dpll =
3664 &new_crtc_state->icl_port_dplls[id];
3666 new_port_dpll->pll = NULL;
3668 if (!old_port_dpll->pll)
3671 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3675 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3676 struct intel_shared_dpll *pll,
3677 struct intel_dpll_hw_state *hw_state)
3679 const enum intel_dpll_id id = pll->info->id;
3680 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3681 intel_wakeref_t wakeref;
3685 wakeref = intel_display_power_get_if_enabled(dev_priv,
3686 POWER_DOMAIN_DISPLAY_CORE);
3690 val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3691 if (!(val & PLL_ENABLE))
3694 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3695 MG_REFCLKIN_CTL(tc_port));
3696 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3698 hw_state->mg_clktop2_coreclkctl1 =
3699 intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3700 hw_state->mg_clktop2_coreclkctl1 &=
3701 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3703 hw_state->mg_clktop2_hsclkctl =
3704 intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3705 hw_state->mg_clktop2_hsclkctl &=
3706 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3707 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3708 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3709 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3711 hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3712 hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3713 hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3714 hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3715 MG_PLL_FRAC_LOCK(tc_port));
3716 hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3718 hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3719 hw_state->mg_pll_tdc_coldst_bias =
3720 intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3722 if (dev_priv->dpll.ref_clks.nssc == 38400) {
3723 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3724 hw_state->mg_pll_bias_mask = 0;
3726 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3727 hw_state->mg_pll_bias_mask = -1U;
3730 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3731 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3735 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3739 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3740 struct intel_shared_dpll *pll,
3741 struct intel_dpll_hw_state *hw_state)
3743 const enum intel_dpll_id id = pll->info->id;
3744 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3745 intel_wakeref_t wakeref;
3749 wakeref = intel_display_power_get_if_enabled(dev_priv,
3750 POWER_DOMAIN_DISPLAY_CORE);
3754 val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3755 if (!(val & PLL_ENABLE))
3759 * All registers read here have the same HIP_INDEX_REG even though
3760 * they are on different building blocks
3762 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3763 HIP_INDEX_VAL(tc_port, 0x2));
3765 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3766 DKL_REFCLKIN_CTL(tc_port));
3767 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3769 hw_state->mg_clktop2_hsclkctl =
3770 intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3771 hw_state->mg_clktop2_hsclkctl &=
3772 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3773 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3774 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3775 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3777 hw_state->mg_clktop2_coreclkctl1 =
3778 intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3779 hw_state->mg_clktop2_coreclkctl1 &=
3780 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3782 hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3783 hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3784 DKL_PLL_DIV0_PROP_COEFF_MASK |
3785 DKL_PLL_DIV0_FBPREDIV_MASK |
3786 DKL_PLL_DIV0_FBDIV_INT_MASK);
3788 hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3789 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3790 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3792 hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3793 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3794 DKL_PLL_SSC_STEP_LEN_MASK |
3795 DKL_PLL_SSC_STEP_NUM_MASK |
3798 hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3799 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3800 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3802 hw_state->mg_pll_tdc_coldst_bias =
3803 intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3804 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3805 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3809 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3813 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3814 struct intel_shared_dpll *pll,
3815 struct intel_dpll_hw_state *hw_state,
3816 i915_reg_t enable_reg)
3818 const enum intel_dpll_id id = pll->info->id;
3819 intel_wakeref_t wakeref;
3823 wakeref = intel_display_power_get_if_enabled(dev_priv,
3824 POWER_DOMAIN_DISPLAY_CORE);
3828 val = intel_de_read(dev_priv, enable_reg);
3829 if (!(val & PLL_ENABLE))
3832 if (IS_ROCKETLAKE(dev_priv)) {
3833 hw_state->cfgcr0 = intel_de_read(dev_priv,
3834 RKL_DPLL_CFGCR0(id));
3835 hw_state->cfgcr1 = intel_de_read(dev_priv,
3836 RKL_DPLL_CFGCR1(id));
3837 } else if (INTEL_GEN(dev_priv) >= 12) {
3838 hw_state->cfgcr0 = intel_de_read(dev_priv,
3839 TGL_DPLL_CFGCR0(id));
3840 hw_state->cfgcr1 = intel_de_read(dev_priv,
3841 TGL_DPLL_CFGCR1(id));
3843 if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3844 hw_state->cfgcr0 = intel_de_read(dev_priv,
3845 ICL_DPLL_CFGCR0(4));
3846 hw_state->cfgcr1 = intel_de_read(dev_priv,
3847 ICL_DPLL_CFGCR1(4));
3849 hw_state->cfgcr0 = intel_de_read(dev_priv,
3850 ICL_DPLL_CFGCR0(id));
3851 hw_state->cfgcr1 = intel_de_read(dev_priv,
3852 ICL_DPLL_CFGCR1(id));
3858 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3862 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3863 struct intel_shared_dpll *pll,
3864 struct intel_dpll_hw_state *hw_state)
3866 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3868 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3871 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3872 struct intel_shared_dpll *pll,
3873 struct intel_dpll_hw_state *hw_state)
3875 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3878 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3879 struct intel_shared_dpll *pll)
3881 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3882 const enum intel_dpll_id id = pll->info->id;
3883 i915_reg_t cfgcr0_reg, cfgcr1_reg;
3885 if (IS_ROCKETLAKE(dev_priv)) {
3886 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3887 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3888 } else if (INTEL_GEN(dev_priv) >= 12) {
3889 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3890 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3892 if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3893 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3894 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3896 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3897 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3901 intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3902 intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3903 intel_de_posting_read(dev_priv, cfgcr1_reg);
3906 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3907 struct intel_shared_dpll *pll)
3909 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3910 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3914 * Some of the following registers have reserved fields, so program
3915 * these with RMW based on a mask. The mask can be fixed or generated
3916 * during the calc/readout phase if the mask depends on some other HW
3917 * state like refclk, see icl_calc_mg_pll_state().
3919 val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3920 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3921 val |= hw_state->mg_refclkin_ctl;
3922 intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3924 val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3925 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3926 val |= hw_state->mg_clktop2_coreclkctl1;
3927 intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3929 val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3930 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3931 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3932 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3933 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3934 val |= hw_state->mg_clktop2_hsclkctl;
3935 intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3937 intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3938 intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3939 intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3940 intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3941 hw_state->mg_pll_frac_lock);
3942 intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3944 val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3945 val &= ~hw_state->mg_pll_bias_mask;
3946 val |= hw_state->mg_pll_bias;
3947 intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3949 val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3950 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3951 val |= hw_state->mg_pll_tdc_coldst_bias;
3952 intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3954 intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3957 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3958 struct intel_shared_dpll *pll)
3960 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3961 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3965 * All registers programmed here have the same HIP_INDEX_REG even
3966 * though on different building block
3968 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3969 HIP_INDEX_VAL(tc_port, 0x2));
3971 /* All the registers are RMW */
3972 val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3973 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3974 val |= hw_state->mg_refclkin_ctl;
3975 intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3977 val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3978 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3979 val |= hw_state->mg_clktop2_coreclkctl1;
3980 intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3982 val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3983 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3984 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3985 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3986 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3987 val |= hw_state->mg_clktop2_hsclkctl;
3988 intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3990 val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3991 val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3992 DKL_PLL_DIV0_PROP_COEFF_MASK |
3993 DKL_PLL_DIV0_FBPREDIV_MASK |
3994 DKL_PLL_DIV0_FBDIV_INT_MASK);
3995 val |= hw_state->mg_pll_div0;
3996 intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3998 val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3999 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
4000 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
4001 val |= hw_state->mg_pll_div1;
4002 intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
4004 val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
4005 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
4006 DKL_PLL_SSC_STEP_LEN_MASK |
4007 DKL_PLL_SSC_STEP_NUM_MASK |
4009 val |= hw_state->mg_pll_ssc;
4010 intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
4012 val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
4013 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
4014 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
4015 val |= hw_state->mg_pll_bias;
4016 intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
4018 val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4019 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
4020 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
4021 val |= hw_state->mg_pll_tdc_coldst_bias;
4022 intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
4024 intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4027 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
4028 struct intel_shared_dpll *pll,
4029 i915_reg_t enable_reg)
4033 val = intel_de_read(dev_priv, enable_reg);
4034 val |= PLL_POWER_ENABLE;
4035 intel_de_write(dev_priv, enable_reg, val);
4038 * The spec says we need to "wait" but it also says it should be
4041 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4042 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
4046 static void icl_pll_enable(struct drm_i915_private *dev_priv,
4047 struct intel_shared_dpll *pll,
4048 i915_reg_t enable_reg)
4052 val = intel_de_read(dev_priv, enable_reg);
4054 intel_de_write(dev_priv, enable_reg, val);
4056 /* Timeout is actually 600us. */
4057 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
4058 drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
4061 static void combo_pll_enable(struct drm_i915_private *dev_priv,
4062 struct intel_shared_dpll *pll)
4064 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4066 if (IS_ELKHARTLAKE(dev_priv) &&
4067 pll->info->id == DPLL_ID_EHL_DPLL4) {
4070 * We need to disable DC states when this DPLL is enabled.
4071 * This can be done by taking a reference on DPLL4 power
4074 pll->wakeref = intel_display_power_get(dev_priv,
4075 POWER_DOMAIN_DPLL_DC_OFF);
4078 icl_pll_power_enable(dev_priv, pll, enable_reg);
4080 icl_dpll_write(dev_priv, pll);
4083 * DVFS pre sequence would be here, but in our driver the cdclk code
4084 * paths should already be setting the appropriate voltage, hence we do
4088 icl_pll_enable(dev_priv, pll, enable_reg);
4090 /* DVFS post sequence would be here. See the comment above. */
4093 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
4094 struct intel_shared_dpll *pll)
4096 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
4098 icl_dpll_write(dev_priv, pll);
4101 * DVFS pre sequence would be here, but in our driver the cdclk code
4102 * paths should already be setting the appropriate voltage, hence we do
4106 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
4108 /* DVFS post sequence would be here. See the comment above. */
4111 static void mg_pll_enable(struct drm_i915_private *dev_priv,
4112 struct intel_shared_dpll *pll)
4114 i915_reg_t enable_reg =
4115 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4117 icl_pll_power_enable(dev_priv, pll, enable_reg);
4119 if (INTEL_GEN(dev_priv) >= 12)
4120 dkl_pll_write(dev_priv, pll);
4122 icl_mg_pll_write(dev_priv, pll);
4125 * DVFS pre sequence would be here, but in our driver the cdclk code
4126 * paths should already be setting the appropriate voltage, hence we do
4130 icl_pll_enable(dev_priv, pll, enable_reg);
4132 /* DVFS post sequence would be here. See the comment above. */
4135 static void icl_pll_disable(struct drm_i915_private *dev_priv,
4136 struct intel_shared_dpll *pll,
4137 i915_reg_t enable_reg)
4141 /* The first steps are done by intel_ddi_post_disable(). */
4144 * DVFS pre sequence would be here, but in our driver the cdclk code
4145 * paths should already be setting the appropriate voltage, hence we do
4149 val = intel_de_read(dev_priv, enable_reg);
4151 intel_de_write(dev_priv, enable_reg, val);
4153 /* Timeout is actually 1us. */
4154 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
4155 drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
4157 /* DVFS post sequence would be here. See the comment above. */
4159 val = intel_de_read(dev_priv, enable_reg);
4160 val &= ~PLL_POWER_ENABLE;
4161 intel_de_write(dev_priv, enable_reg, val);
4164 * The spec says we need to "wait" but it also says it should be
4167 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4168 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
4172 static void combo_pll_disable(struct drm_i915_private *dev_priv,
4173 struct intel_shared_dpll *pll)
4175 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4177 icl_pll_disable(dev_priv, pll, enable_reg);
4179 if (IS_ELKHARTLAKE(dev_priv) &&
4180 pll->info->id == DPLL_ID_EHL_DPLL4)
4181 intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
4185 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
4186 struct intel_shared_dpll *pll)
4188 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
4191 static void mg_pll_disable(struct drm_i915_private *dev_priv,
4192 struct intel_shared_dpll *pll)
4194 i915_reg_t enable_reg =
4195 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4197 icl_pll_disable(dev_priv, pll, enable_reg);
4200 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4203 i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
4206 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4207 const struct intel_dpll_hw_state *hw_state)
4209 drm_dbg_kms(&dev_priv->drm,
4210 "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
4211 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4212 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4213 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4214 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4215 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4216 hw_state->cfgcr0, hw_state->cfgcr1,
4217 hw_state->mg_refclkin_ctl,
4218 hw_state->mg_clktop2_coreclkctl1,
4219 hw_state->mg_clktop2_hsclkctl,
4220 hw_state->mg_pll_div0,
4221 hw_state->mg_pll_div1,
4222 hw_state->mg_pll_lf,
4223 hw_state->mg_pll_frac_lock,
4224 hw_state->mg_pll_ssc,
4225 hw_state->mg_pll_bias,
4226 hw_state->mg_pll_tdc_coldst_bias);
4229 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4230 .enable = combo_pll_enable,
4231 .disable = combo_pll_disable,
4232 .get_hw_state = combo_pll_get_hw_state,
4233 .get_freq = icl_ddi_combo_pll_get_freq,
4236 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4237 .enable = tbt_pll_enable,
4238 .disable = tbt_pll_disable,
4239 .get_hw_state = tbt_pll_get_hw_state,
4240 .get_freq = icl_ddi_tbt_pll_get_freq,
4243 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4244 .enable = mg_pll_enable,
4245 .disable = mg_pll_disable,
4246 .get_hw_state = mg_pll_get_hw_state,
4247 .get_freq = icl_ddi_mg_pll_get_freq,
4250 static const struct dpll_info icl_plls[] = {
4251 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4252 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4253 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4254 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4255 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4256 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4257 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4261 static const struct intel_dpll_mgr icl_pll_mgr = {
4262 .dpll_info = icl_plls,
4263 .get_dplls = icl_get_dplls,
4264 .put_dplls = icl_put_dplls,
4265 .update_active_dpll = icl_update_active_dpll,
4266 .update_ref_clks = icl_update_dpll_ref_clks,
4267 .dump_hw_state = icl_dump_hw_state,
4270 static const struct dpll_info ehl_plls[] = {
4271 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4272 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4273 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4277 static const struct intel_dpll_mgr ehl_pll_mgr = {
4278 .dpll_info = ehl_plls,
4279 .get_dplls = icl_get_dplls,
4280 .put_dplls = icl_put_dplls,
4281 .update_ref_clks = icl_update_dpll_ref_clks,
4282 .dump_hw_state = icl_dump_hw_state,
4285 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4286 .enable = mg_pll_enable,
4287 .disable = mg_pll_disable,
4288 .get_hw_state = dkl_pll_get_hw_state,
4289 .get_freq = icl_ddi_mg_pll_get_freq,
4292 static const struct dpll_info tgl_plls[] = {
4293 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4294 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4295 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4296 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4297 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4298 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4299 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4300 { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4301 { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4305 static const struct intel_dpll_mgr tgl_pll_mgr = {
4306 .dpll_info = tgl_plls,
4307 .get_dplls = icl_get_dplls,
4308 .put_dplls = icl_put_dplls,
4309 .update_active_dpll = icl_update_active_dpll,
4310 .update_ref_clks = icl_update_dpll_ref_clks,
4311 .dump_hw_state = icl_dump_hw_state,
4314 static const struct dpll_info rkl_plls[] = {
4315 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4316 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4317 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4321 static const struct intel_dpll_mgr rkl_pll_mgr = {
4322 .dpll_info = rkl_plls,
4323 .get_dplls = icl_get_dplls,
4324 .put_dplls = icl_put_dplls,
4325 .update_ref_clks = icl_update_dpll_ref_clks,
4326 .dump_hw_state = icl_dump_hw_state,
4330 * intel_shared_dpll_init - Initialize shared DPLLs
4333 * Initialize shared DPLLs for @dev.
4335 void intel_shared_dpll_init(struct drm_device *dev)
4337 struct drm_i915_private *dev_priv = to_i915(dev);
4338 const struct intel_dpll_mgr *dpll_mgr = NULL;
4339 const struct dpll_info *dpll_info;
4342 if (IS_ROCKETLAKE(dev_priv))
4343 dpll_mgr = &rkl_pll_mgr;
4344 else if (INTEL_GEN(dev_priv) >= 12)
4345 dpll_mgr = &tgl_pll_mgr;
4346 else if (IS_ELKHARTLAKE(dev_priv))
4347 dpll_mgr = &ehl_pll_mgr;
4348 else if (INTEL_GEN(dev_priv) >= 11)
4349 dpll_mgr = &icl_pll_mgr;
4350 else if (IS_CANNONLAKE(dev_priv))
4351 dpll_mgr = &cnl_pll_mgr;
4352 else if (IS_GEN9_BC(dev_priv))
4353 dpll_mgr = &skl_pll_mgr;
4354 else if (IS_GEN9_LP(dev_priv))
4355 dpll_mgr = &bxt_pll_mgr;
4356 else if (HAS_DDI(dev_priv))
4357 dpll_mgr = &hsw_pll_mgr;
4358 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4359 dpll_mgr = &pch_pll_mgr;
4362 dev_priv->dpll.num_shared_dpll = 0;
4366 dpll_info = dpll_mgr->dpll_info;
4368 for (i = 0; dpll_info[i].name; i++) {
4369 drm_WARN_ON(dev, i != dpll_info[i].id);
4370 dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4373 dev_priv->dpll.mgr = dpll_mgr;
4374 dev_priv->dpll.num_shared_dpll = i;
4375 mutex_init(&dev_priv->dpll.lock);
4377 BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4381 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4382 * @state: atomic state
4383 * @crtc: CRTC to reserve DPLLs for
4386 * This function reserves all required DPLLs for the given CRTC and encoder
4387 * combination in the current atomic commit @state and the new @crtc atomic
4390 * The new configuration in the atomic commit @state is made effective by
4391 * calling intel_shared_dpll_swap_state().
4393 * The reserved DPLLs should be released by calling
4394 * intel_release_shared_dplls().
4397 * True if all required DPLLs were successfully reserved.
4399 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4400 struct intel_crtc *crtc,
4401 struct intel_encoder *encoder)
4403 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4404 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4406 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4409 return dpll_mgr->get_dplls(state, crtc, encoder);
4413 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4414 * @state: atomic state
4415 * @crtc: crtc from which the DPLLs are to be released
4417 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4418 * from the current atomic commit @state and the old @crtc atomic state.
4420 * The new configuration in the atomic commit @state is made effective by
4421 * calling intel_shared_dpll_swap_state().
4423 void intel_release_shared_dplls(struct intel_atomic_state *state,
4424 struct intel_crtc *crtc)
4426 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4427 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4430 * FIXME: this function is called for every platform having a
4431 * compute_clock hook, even though the platform doesn't yet support
4432 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4438 dpll_mgr->put_dplls(state, crtc);
4442 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4443 * @state: atomic state
4444 * @crtc: the CRTC for which to update the active DPLL
4445 * @encoder: encoder determining the type of port DPLL
4447 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4448 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4449 * DPLL selected will be based on the current mode of the encoder's port.
4451 void intel_update_active_dpll(struct intel_atomic_state *state,
4452 struct intel_crtc *crtc,
4453 struct intel_encoder *encoder)
4455 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4456 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4458 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4461 dpll_mgr->update_active_dpll(state, crtc, encoder);
4465 * intel_dpll_get_freq - calculate the DPLL's output frequency
4466 * @i915: i915 device
4467 * @pll: DPLL for which to calculate the output frequency
4469 * Return the output frequency corresponding to @pll's current state.
4471 int intel_dpll_get_freq(struct drm_i915_private *i915,
4472 const struct intel_shared_dpll *pll)
4474 if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4477 return pll->info->funcs->get_freq(i915, pll);
4480 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4481 struct intel_shared_dpll *pll)
4483 struct intel_crtc *crtc;
4485 pll->on = pll->info->funcs->get_hw_state(i915, pll,
4486 &pll->state.hw_state);
4488 if (IS_ELKHARTLAKE(i915) && pll->on &&
4489 pll->info->id == DPLL_ID_EHL_DPLL4) {
4490 pll->wakeref = intel_display_power_get(i915,
4491 POWER_DOMAIN_DPLL_DC_OFF);
4494 pll->state.crtc_mask = 0;
4495 for_each_intel_crtc(&i915->drm, crtc) {
4496 struct intel_crtc_state *crtc_state =
4497 to_intel_crtc_state(crtc->base.state);
4499 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4500 pll->state.crtc_mask |= 1 << crtc->pipe;
4502 pll->active_mask = pll->state.crtc_mask;
4504 drm_dbg_kms(&i915->drm,
4505 "%s hw state readout: crtc_mask 0x%08x, on %i\n",
4506 pll->info->name, pll->state.crtc_mask, pll->on);
4509 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4513 if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4514 i915->dpll.mgr->update_ref_clks(i915);
4516 for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4517 readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4520 static void sanitize_dpll_state(struct drm_i915_private *i915,
4521 struct intel_shared_dpll *pll)
4523 if (!pll->on || pll->active_mask)
4526 drm_dbg_kms(&i915->drm,
4527 "%s enabled but not in use, disabling\n",
4530 pll->info->funcs->disable(i915, pll);
4534 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4538 for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4539 sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4543 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
4544 * @dev_priv: i915 drm device
4545 * @hw_state: hw state to be written to the log
4547 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4549 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4550 const struct intel_dpll_hw_state *hw_state)
4552 if (dev_priv->dpll.mgr) {
4553 dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4555 /* fallback for platforms that don't use the shared dpll
4558 drm_dbg_kms(&dev_priv->drm,
4559 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4560 "fp0: 0x%x, fp1: 0x%x\n",