2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
34 #include <drm/drm_atomic_helper.h>
39 * RC6 is a special power stage which allows the GPU to enter an very
40 * low-voltage mode when idle, using down to 0V while at this stage. This
41 * stage is entered automatically when the GPU is idle when RC6 support is
42 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
44 * There are different RC6 modes available in Intel GPU, which differentiate
45 * among each other with the latency required to enter and leave RC6 and
46 * voltage consumed by the GPU in different states.
48 * The combination of the following flags define which states GPU is allowed
49 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
50 * RC6pp is deepest RC6. Their support by hardware varies according to the
51 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
52 * which brings the most power savings; deeper states save more power, but
53 * require higher latency to switch to and wake up.
55 #define INTEL_RC6_ENABLE (1<<0)
56 #define INTEL_RC6p_ENABLE (1<<1)
57 #define INTEL_RC6pp_ENABLE (1<<2)
59 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
62 I915_WRITE(CHICKEN_PAR1_1,
63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
66 * Display WA#0390: skl,bxt,kbl,glk
68 * Must match Sampler, Pixel Back End, and Media
69 * (0xE194 bit 8, 0x7014 bit 13, 0x4DDC bits 27 and 31).
71 * Including bits outside the page in the hash would
72 * require 2 (or 4?) MiB alignment of resources. Just
73 * assume the defaul hashing mode which only uses bits
76 I915_WRITE(CHICKEN_PAR1_1,
77 I915_READ(CHICKEN_PAR1_1) & ~SKL_RC_HASH_OUTSIDE);
79 I915_WRITE(GEN8_CONFIG0,
80 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
82 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
83 I915_WRITE(GEN8_CHICKEN_DCPR_1,
84 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
86 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
87 /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
88 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
90 DISP_FBC_MEMORY_WAKE);
92 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
93 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
94 ILK_DPFC_DISABLE_DUMMY0);
96 if (IS_SKYLAKE(dev_priv)) {
97 /* WaDisableDopClockGating */
98 I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
99 & ~GEN7_DOP_CLOCK_GATE_ENABLE);
103 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
105 gen9_init_clock_gating(dev_priv);
107 /* WaDisableSDEUnitClockGating:bxt */
108 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
109 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
113 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
115 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
116 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
119 * Wa: Backlight PWM may stop in the asserted state, causing backlight
122 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
123 PWM1_GATING_DIS | PWM2_GATING_DIS);
126 * Lower the display internal timeout.
127 * This is needed to avoid any hard hangs when DSI port PLL
128 * is off and a MMIO access is attempted by any privilege
129 * application, using batch buffers or any other means.
131 I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
134 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
136 gen9_init_clock_gating(dev_priv);
139 * WaDisablePWMClockGating:glk
140 * Backlight PWM may stop in the asserted state, causing backlight
143 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
144 PWM1_GATING_DIS | PWM2_GATING_DIS);
146 /* WaDDIIOTimeout:glk */
147 if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
148 u32 val = I915_READ(CHICKEN_MISC_2);
149 val &= ~(GLK_CL0_PWR_DOWN |
152 I915_WRITE(CHICKEN_MISC_2, val);
157 static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
161 tmp = I915_READ(CLKCFG);
163 switch (tmp & CLKCFG_FSB_MASK) {
165 dev_priv->fsb_freq = 533; /* 133*4 */
168 dev_priv->fsb_freq = 800; /* 200*4 */
171 dev_priv->fsb_freq = 667; /* 167*4 */
174 dev_priv->fsb_freq = 400; /* 100*4 */
178 switch (tmp & CLKCFG_MEM_MASK) {
180 dev_priv->mem_freq = 533;
183 dev_priv->mem_freq = 667;
186 dev_priv->mem_freq = 800;
190 /* detect pineview DDR3 setting */
191 tmp = I915_READ(CSHRDDR3CTL);
192 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
195 static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
199 ddrpll = I915_READ16(DDRMPLL1);
200 csipll = I915_READ16(CSIPLL0);
202 switch (ddrpll & 0xff) {
204 dev_priv->mem_freq = 800;
207 dev_priv->mem_freq = 1066;
210 dev_priv->mem_freq = 1333;
213 dev_priv->mem_freq = 1600;
216 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
218 dev_priv->mem_freq = 0;
222 dev_priv->ips.r_t = dev_priv->mem_freq;
224 switch (csipll & 0x3ff) {
226 dev_priv->fsb_freq = 3200;
229 dev_priv->fsb_freq = 3733;
232 dev_priv->fsb_freq = 4266;
235 dev_priv->fsb_freq = 4800;
238 dev_priv->fsb_freq = 5333;
241 dev_priv->fsb_freq = 5866;
244 dev_priv->fsb_freq = 6400;
247 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
249 dev_priv->fsb_freq = 0;
253 if (dev_priv->fsb_freq == 3200) {
254 dev_priv->ips.c_m = 0;
255 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
256 dev_priv->ips.c_m = 1;
258 dev_priv->ips.c_m = 2;
262 static const struct cxsr_latency cxsr_latency_table[] = {
263 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
264 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
265 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
266 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
267 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
269 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
270 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
271 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
272 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
273 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
275 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
276 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
277 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
278 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
279 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
281 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
282 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
283 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
284 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
285 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
287 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
288 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
289 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
290 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
291 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
293 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
294 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
295 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
296 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
297 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
300 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
305 const struct cxsr_latency *latency;
308 if (fsb == 0 || mem == 0)
311 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
312 latency = &cxsr_latency_table[i];
313 if (is_desktop == latency->is_desktop &&
314 is_ddr3 == latency->is_ddr3 &&
315 fsb == latency->fsb_freq && mem == latency->mem_freq)
319 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
324 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
328 mutex_lock(&dev_priv->rps.hw_lock);
330 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
332 val &= ~FORCE_DDR_HIGH_FREQ;
334 val |= FORCE_DDR_HIGH_FREQ;
335 val &= ~FORCE_DDR_LOW_FREQ;
336 val |= FORCE_DDR_FREQ_REQ_ACK;
337 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
339 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
340 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
341 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
343 mutex_unlock(&dev_priv->rps.hw_lock);
346 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
350 mutex_lock(&dev_priv->rps.hw_lock);
352 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
354 val |= DSP_MAXFIFO_PM5_ENABLE;
356 val &= ~DSP_MAXFIFO_PM5_ENABLE;
357 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
359 mutex_unlock(&dev_priv->rps.hw_lock);
362 #define FW_WM(value, plane) \
363 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
365 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
370 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
371 was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
372 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
373 POSTING_READ(FW_BLC_SELF_VLV);
374 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
375 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
376 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
377 POSTING_READ(FW_BLC_SELF);
378 } else if (IS_PINEVIEW(dev_priv)) {
379 val = I915_READ(DSPFW3);
380 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
382 val |= PINEVIEW_SELF_REFRESH_EN;
384 val &= ~PINEVIEW_SELF_REFRESH_EN;
385 I915_WRITE(DSPFW3, val);
386 POSTING_READ(DSPFW3);
387 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
388 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
389 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
390 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
391 I915_WRITE(FW_BLC_SELF, val);
392 POSTING_READ(FW_BLC_SELF);
393 } else if (IS_I915GM(dev_priv)) {
395 * FIXME can't find a bit like this for 915G, and
396 * and yet it does have the related watermark in
397 * FW_BLC_SELF. What's going on?
399 was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
400 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
401 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
402 I915_WRITE(INSTPM, val);
403 POSTING_READ(INSTPM);
408 trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
410 DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
411 enableddisabled(enable),
412 enableddisabled(was_enabled));
418 * intel_set_memory_cxsr - Configure CxSR state
419 * @dev_priv: i915 device
420 * @enable: Allow vs. disallow CxSR
422 * Allow or disallow the system to enter a special CxSR
423 * (C-state self refresh) state. What typically happens in CxSR mode
424 * is that several display FIFOs may get combined into a single larger
425 * FIFO for a particular plane (so called max FIFO mode) to allow the
426 * system to defer memory fetches longer, and the memory will enter
429 * Note that enabling CxSR does not guarantee that the system enter
430 * this special mode, nor does it guarantee that the system stays
431 * in that mode once entered. So this just allows/disallows the system
432 * to autonomously utilize the CxSR mode. Other factors such as core
433 * C-states will affect when/if the system actually enters/exits the
436 * Note that on VLV/CHV this actually only controls the max FIFO mode,
437 * and the system is free to enter/exit memory self refresh at any time
438 * even when the use of CxSR has been disallowed.
440 * While the system is actually in the CxSR/max FIFO mode, some plane
441 * control registers will not get latched on vblank. Thus in order to
442 * guarantee the system will respond to changes in the plane registers
443 * we must always disallow CxSR prior to making changes to those registers.
444 * Unfortunately the system will re-evaluate the CxSR conditions at
445 * frame start which happens after vblank start (which is when the plane
446 * registers would get latched), so we can't proceed with the plane update
447 * during the same frame where we disallowed CxSR.
449 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
450 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
451 * the hardware w.r.t. HPLL SR when writing to plane registers.
452 * Disallowing just CxSR is sufficient.
454 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
458 mutex_lock(&dev_priv->wm.wm_mutex);
459 ret = _intel_set_memory_cxsr(dev_priv, enable);
460 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
461 dev_priv->wm.vlv.cxsr = enable;
462 else if (IS_G4X(dev_priv))
463 dev_priv->wm.g4x.cxsr = enable;
464 mutex_unlock(&dev_priv->wm.wm_mutex);
470 * Latency for FIFO fetches is dependent on several factors:
471 * - memory configuration (speed, channels)
473 * - current MCH state
474 * It can be fairly high in some situations, so here we assume a fairly
475 * pessimal value. It's a tradeoff between extra memory fetches (if we
476 * set this value too high, the FIFO will fetch frequently to stay full)
477 * and power consumption (set it too low to save power and we might see
478 * FIFO underruns and display "flicker").
480 * A value of 5us seems to be a good balance; safe for very low end
481 * platforms but not overly aggressive on lower latency configs.
483 static const int pessimal_latency_ns = 5000;
485 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
486 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
488 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
490 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
491 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
492 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
493 enum pipe pipe = crtc->pipe;
494 int sprite0_start, sprite1_start;
497 uint32_t dsparb, dsparb2, dsparb3;
499 dsparb = I915_READ(DSPARB);
500 dsparb2 = I915_READ(DSPARB2);
501 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
502 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
505 dsparb = I915_READ(DSPARB);
506 dsparb2 = I915_READ(DSPARB2);
507 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
508 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
511 dsparb2 = I915_READ(DSPARB2);
512 dsparb3 = I915_READ(DSPARB3);
513 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
514 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
521 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
522 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
523 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
524 fifo_state->plane[PLANE_CURSOR] = 63;
527 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
529 uint32_t dsparb = I915_READ(DSPARB);
532 size = dsparb & 0x7f;
534 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
536 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
537 plane ? "B" : "A", size);
542 static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
544 uint32_t dsparb = I915_READ(DSPARB);
547 size = dsparb & 0x1ff;
549 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
550 size >>= 1; /* Convert to cachelines */
552 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
553 plane ? "B" : "A", size);
558 static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
560 uint32_t dsparb = I915_READ(DSPARB);
563 size = dsparb & 0x7f;
564 size >>= 2; /* Convert to cachelines */
566 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
573 /* Pineview has different values for various configs */
574 static const struct intel_watermark_params pineview_display_wm = {
575 .fifo_size = PINEVIEW_DISPLAY_FIFO,
576 .max_wm = PINEVIEW_MAX_WM,
577 .default_wm = PINEVIEW_DFT_WM,
578 .guard_size = PINEVIEW_GUARD_WM,
579 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
581 static const struct intel_watermark_params pineview_display_hplloff_wm = {
582 .fifo_size = PINEVIEW_DISPLAY_FIFO,
583 .max_wm = PINEVIEW_MAX_WM,
584 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
585 .guard_size = PINEVIEW_GUARD_WM,
586 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
588 static const struct intel_watermark_params pineview_cursor_wm = {
589 .fifo_size = PINEVIEW_CURSOR_FIFO,
590 .max_wm = PINEVIEW_CURSOR_MAX_WM,
591 .default_wm = PINEVIEW_CURSOR_DFT_WM,
592 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
593 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
595 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
596 .fifo_size = PINEVIEW_CURSOR_FIFO,
597 .max_wm = PINEVIEW_CURSOR_MAX_WM,
598 .default_wm = PINEVIEW_CURSOR_DFT_WM,
599 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
600 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
602 static const struct intel_watermark_params i965_cursor_wm_info = {
603 .fifo_size = I965_CURSOR_FIFO,
604 .max_wm = I965_CURSOR_MAX_WM,
605 .default_wm = I965_CURSOR_DFT_WM,
607 .cacheline_size = I915_FIFO_LINE_SIZE,
609 static const struct intel_watermark_params i945_wm_info = {
610 .fifo_size = I945_FIFO_SIZE,
611 .max_wm = I915_MAX_WM,
614 .cacheline_size = I915_FIFO_LINE_SIZE,
616 static const struct intel_watermark_params i915_wm_info = {
617 .fifo_size = I915_FIFO_SIZE,
618 .max_wm = I915_MAX_WM,
621 .cacheline_size = I915_FIFO_LINE_SIZE,
623 static const struct intel_watermark_params i830_a_wm_info = {
624 .fifo_size = I855GM_FIFO_SIZE,
625 .max_wm = I915_MAX_WM,
628 .cacheline_size = I830_FIFO_LINE_SIZE,
630 static const struct intel_watermark_params i830_bc_wm_info = {
631 .fifo_size = I855GM_FIFO_SIZE,
632 .max_wm = I915_MAX_WM/2,
635 .cacheline_size = I830_FIFO_LINE_SIZE,
637 static const struct intel_watermark_params i845_wm_info = {
638 .fifo_size = I830_FIFO_SIZE,
639 .max_wm = I915_MAX_WM,
642 .cacheline_size = I830_FIFO_LINE_SIZE,
646 * intel_wm_method1 - Method 1 / "small buffer" watermark formula
647 * @pixel_rate: Pipe pixel rate in kHz
648 * @cpp: Plane bytes per pixel
649 * @latency: Memory wakeup latency in 0.1us units
651 * Compute the watermark using the method 1 or "small buffer"
652 * formula. The caller may additonally add extra cachelines
653 * to account for TLB misses and clock crossings.
655 * This method is concerned with the short term drain rate
656 * of the FIFO, ie. it does not account for blanking periods
657 * which would effectively reduce the average drain rate across
658 * a longer period. The name "small" refers to the fact the
659 * FIFO is relatively small compared to the amount of data
662 * The FIFO level vs. time graph might look something like:
666 * __---__---__ (- plane active, _ blanking)
669 * or perhaps like this:
672 * __----__----__ (- plane active, _ blanking)
676 * The watermark in bytes
678 static unsigned int intel_wm_method1(unsigned int pixel_rate,
680 unsigned int latency)
684 ret = (uint64_t) pixel_rate * cpp * latency;
685 ret = DIV_ROUND_UP_ULL(ret, 10000);
691 * intel_wm_method2 - Method 2 / "large buffer" watermark formula
692 * @pixel_rate: Pipe pixel rate in kHz
693 * @htotal: Pipe horizontal total
694 * @width: Plane width in pixels
695 * @cpp: Plane bytes per pixel
696 * @latency: Memory wakeup latency in 0.1us units
698 * Compute the watermark using the method 2 or "large buffer"
699 * formula. The caller may additonally add extra cachelines
700 * to account for TLB misses and clock crossings.
702 * This method is concerned with the long term drain rate
703 * of the FIFO, ie. it does account for blanking periods
704 * which effectively reduce the average drain rate across
705 * a longer period. The name "large" refers to the fact the
706 * FIFO is relatively large compared to the amount of data
709 * The FIFO level vs. time graph might look something like:
714 * __ --__--__--__--__--__--__ (- plane active, _ blanking)
718 * The watermark in bytes
720 static unsigned int intel_wm_method2(unsigned int pixel_rate,
724 unsigned int latency)
729 * FIXME remove once all users are computing
730 * watermarks in the correct place.
732 if (WARN_ON_ONCE(htotal == 0))
735 ret = (latency * pixel_rate) / (htotal * 10000);
736 ret = (ret + 1) * width * cpp;
742 * intel_calculate_wm - calculate watermark level
743 * @pixel_rate: pixel clock
744 * @wm: chip FIFO params
745 * @cpp: bytes per pixel
746 * @latency_ns: memory latency for the platform
748 * Calculate the watermark level (the level at which the display plane will
749 * start fetching from memory again). Each chip has a different display
750 * FIFO size and allocation, so the caller needs to figure that out and pass
751 * in the correct intel_watermark_params structure.
753 * As the pixel clock runs, the FIFO will be drained at a rate that depends
754 * on the pixel size. When it reaches the watermark level, it'll start
755 * fetching FIFO line sized based chunks from memory until the FIFO fills
756 * past the watermark point. If the FIFO drains completely, a FIFO underrun
757 * will occur, and a display engine hang could result.
759 static unsigned int intel_calculate_wm(int pixel_rate,
760 const struct intel_watermark_params *wm,
761 int fifo_size, int cpp,
762 unsigned int latency_ns)
764 int entries, wm_size;
767 * Note: we need to make sure we don't overflow for various clock &
769 * clocks go from a few thousand to several hundred thousand.
770 * latency is usually a few thousand
772 entries = intel_wm_method1(pixel_rate, cpp,
774 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
776 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
778 wm_size = fifo_size - entries;
779 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
781 /* Don't promote wm_size to unsigned... */
782 if (wm_size > wm->max_wm)
783 wm_size = wm->max_wm;
785 wm_size = wm->default_wm;
788 * Bspec seems to indicate that the value shouldn't be lower than
789 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
790 * Lets go for 8 which is the burst size since certain platforms
791 * already use a hardcoded 8 (which is what the spec says should be
800 static bool is_disabling(int old, int new, int threshold)
802 return old >= threshold && new < threshold;
805 static bool is_enabling(int old, int new, int threshold)
807 return old < threshold && new >= threshold;
810 static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
812 return dev_priv->wm.max_level + 1;
815 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
816 const struct intel_plane_state *plane_state)
818 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
820 /* FIXME check the 'enable' instead */
821 if (!crtc_state->base.active)
825 * Treat cursor with fb as always visible since cursor updates
826 * can happen faster than the vrefresh rate, and the current
827 * watermark code doesn't handle that correctly. Cursor updates
828 * which set/clear the fb or change the cursor size are going
829 * to get throttled by intel_legacy_cursor_update() to work
830 * around this problem with the watermark code.
832 if (plane->id == PLANE_CURSOR)
833 return plane_state->base.fb != NULL;
835 return plane_state->base.visible;
838 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
840 struct intel_crtc *crtc, *enabled = NULL;
842 for_each_intel_crtc(&dev_priv->drm, crtc) {
843 if (intel_crtc_active(crtc)) {
853 static void pineview_update_wm(struct intel_crtc *unused_crtc)
855 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
856 struct intel_crtc *crtc;
857 const struct cxsr_latency *latency;
861 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
866 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
867 intel_set_memory_cxsr(dev_priv, false);
871 crtc = single_enabled_crtc(dev_priv);
873 const struct drm_display_mode *adjusted_mode =
874 &crtc->config->base.adjusted_mode;
875 const struct drm_framebuffer *fb =
876 crtc->base.primary->state->fb;
877 int cpp = fb->format->cpp[0];
878 int clock = adjusted_mode->crtc_clock;
881 wm = intel_calculate_wm(clock, &pineview_display_wm,
882 pineview_display_wm.fifo_size,
883 cpp, latency->display_sr);
884 reg = I915_READ(DSPFW1);
885 reg &= ~DSPFW_SR_MASK;
886 reg |= FW_WM(wm, SR);
887 I915_WRITE(DSPFW1, reg);
888 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
891 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
892 pineview_display_wm.fifo_size,
893 4, latency->cursor_sr);
894 reg = I915_READ(DSPFW3);
895 reg &= ~DSPFW_CURSOR_SR_MASK;
896 reg |= FW_WM(wm, CURSOR_SR);
897 I915_WRITE(DSPFW3, reg);
899 /* Display HPLL off SR */
900 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
901 pineview_display_hplloff_wm.fifo_size,
902 cpp, latency->display_hpll_disable);
903 reg = I915_READ(DSPFW3);
904 reg &= ~DSPFW_HPLL_SR_MASK;
905 reg |= FW_WM(wm, HPLL_SR);
906 I915_WRITE(DSPFW3, reg);
908 /* cursor HPLL off SR */
909 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
910 pineview_display_hplloff_wm.fifo_size,
911 4, latency->cursor_hpll_disable);
912 reg = I915_READ(DSPFW3);
913 reg &= ~DSPFW_HPLL_CURSOR_MASK;
914 reg |= FW_WM(wm, HPLL_CURSOR);
915 I915_WRITE(DSPFW3, reg);
916 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
918 intel_set_memory_cxsr(dev_priv, true);
920 intel_set_memory_cxsr(dev_priv, false);
925 * Documentation says:
926 * "If the line size is small, the TLB fetches can get in the way of the
927 * data fetches, causing some lag in the pixel data return which is not
928 * accounted for in the above formulas. The following adjustment only
929 * needs to be applied if eight whole lines fit in the buffer at once.
930 * The WM is adjusted upwards by the difference between the FIFO size
931 * and the size of 8 whole lines. This adjustment is always performed
932 * in the actual pixel depth regardless of whether FBC is enabled or not."
934 static int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
936 int tlb_miss = fifo_size * 64 - width * cpp * 8;
938 return max(0, tlb_miss);
941 static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
942 const struct g4x_wm_values *wm)
946 for_each_pipe(dev_priv, pipe)
947 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
950 FW_WM(wm->sr.plane, SR) |
951 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
952 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
953 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
955 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
956 FW_WM(wm->sr.fbc, FBC_SR) |
957 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
958 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
959 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
960 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
962 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
963 FW_WM(wm->sr.cursor, CURSOR_SR) |
964 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
965 FW_WM(wm->hpll.plane, HPLL_SR));
967 POSTING_READ(DSPFW1);
970 #define FW_WM_VLV(value, plane) \
971 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
973 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
974 const struct vlv_wm_values *wm)
978 for_each_pipe(dev_priv, pipe) {
979 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
981 I915_WRITE(VLV_DDL(pipe),
982 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
983 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
984 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
985 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
989 * Zero the (unused) WM1 watermarks, and also clear all the
990 * high order bits so that there are no out of bounds values
991 * present in the registers during the reprogramming.
993 I915_WRITE(DSPHOWM, 0);
994 I915_WRITE(DSPHOWM1, 0);
995 I915_WRITE(DSPFW4, 0);
996 I915_WRITE(DSPFW5, 0);
997 I915_WRITE(DSPFW6, 0);
1000 FW_WM(wm->sr.plane, SR) |
1001 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
1002 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1003 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1005 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1006 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1007 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1009 FW_WM(wm->sr.cursor, CURSOR_SR));
1011 if (IS_CHERRYVIEW(dev_priv)) {
1012 I915_WRITE(DSPFW7_CHV,
1013 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1014 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1015 I915_WRITE(DSPFW8_CHV,
1016 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1017 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1018 I915_WRITE(DSPFW9_CHV,
1019 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1020 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1022 FW_WM(wm->sr.plane >> 9, SR_HI) |
1023 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1024 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1025 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1026 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1027 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1028 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1029 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1030 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1031 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1034 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1035 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1037 FW_WM(wm->sr.plane >> 9, SR_HI) |
1038 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1039 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1040 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1041 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1042 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1043 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1046 POSTING_READ(DSPFW1);
1051 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1053 /* all latencies in usec */
1054 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1055 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1056 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1058 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1061 static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1064 * DSPCNTR[13] supposedly controls whether the
1065 * primary plane can use the FIFO space otherwise
1066 * reserved for the sprite plane. It's not 100% clear
1067 * what the actual FIFO size is, but it looks like we
1068 * can happily set both primary and sprite watermarks
1069 * up to 127 cachelines. So that would seem to mean
1070 * that either DSPCNTR[13] doesn't do anything, or that
1071 * the total FIFO is >= 256 cachelines in size. Either
1072 * way, we don't seem to have to worry about this
1073 * repartitioning as the maximum watermark value the
1074 * register can hold for each plane is lower than the
1075 * minimum FIFO size.
1081 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1083 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1085 MISSING_CASE(plane_id);
1090 static int g4x_fbc_fifo_size(int level)
1093 case G4X_WM_LEVEL_SR:
1095 case G4X_WM_LEVEL_HPLL:
1098 MISSING_CASE(level);
1103 static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1104 const struct intel_plane_state *plane_state,
1107 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1108 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1109 const struct drm_display_mode *adjusted_mode =
1110 &crtc_state->base.adjusted_mode;
1111 int clock, htotal, cpp, width, wm;
1112 int latency = dev_priv->wm.pri_latency[level] * 10;
1117 if (!intel_wm_plane_visible(crtc_state, plane_state))
1121 * Not 100% sure which way ELK should go here as the
1122 * spec only says CL/CTG should assume 32bpp and BW
1123 * doesn't need to. But as these things followed the
1124 * mobile vs. desktop lines on gen3 as well, let's
1125 * assume ELK doesn't need this.
1127 * The spec also fails to list such a restriction for
1128 * the HPLL watermark, which seems a little strange.
1129 * Let's use 32bpp for the HPLL watermark as well.
1131 if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
1132 level != G4X_WM_LEVEL_NORMAL)
1135 cpp = plane_state->base.fb->format->cpp[0];
1137 clock = adjusted_mode->crtc_clock;
1138 htotal = adjusted_mode->crtc_htotal;
1140 if (plane->id == PLANE_CURSOR)
1141 width = plane_state->base.crtc_w;
1143 width = drm_rect_width(&plane_state->base.dst);
1145 if (plane->id == PLANE_CURSOR) {
1146 wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1147 } else if (plane->id == PLANE_PRIMARY &&
1148 level == G4X_WM_LEVEL_NORMAL) {
1149 wm = intel_wm_method1(clock, cpp, latency);
1153 small = intel_wm_method1(clock, cpp, latency);
1154 large = intel_wm_method2(clock, htotal, width, cpp, latency);
1156 wm = min(small, large);
1159 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1162 wm = DIV_ROUND_UP(wm, 64) + 2;
1164 return min_t(int, wm, USHRT_MAX);
1167 static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1168 int level, enum plane_id plane_id, u16 value)
1170 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1173 for (; level < intel_wm_num_levels(dev_priv); level++) {
1174 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1176 dirty |= raw->plane[plane_id] != value;
1177 raw->plane[plane_id] = value;
1183 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1184 int level, u16 value)
1186 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1189 /* NORMAL level doesn't have an FBC watermark */
1190 level = max(level, G4X_WM_LEVEL_SR);
1192 for (; level < intel_wm_num_levels(dev_priv); level++) {
1193 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1195 dirty |= raw->fbc != value;
1202 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1203 const struct intel_plane_state *pstate,
1206 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1207 const struct intel_plane_state *plane_state)
1209 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1210 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1211 enum plane_id plane_id = plane->id;
1215 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1216 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1217 if (plane_id == PLANE_PRIMARY)
1218 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1222 for (level = 0; level < num_levels; level++) {
1223 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1226 wm = g4x_compute_wm(crtc_state, plane_state, level);
1227 max_wm = g4x_plane_fifo_size(plane_id, level);
1232 dirty |= raw->plane[plane_id] != wm;
1233 raw->plane[plane_id] = wm;
1235 if (plane_id != PLANE_PRIMARY ||
1236 level == G4X_WM_LEVEL_NORMAL)
1239 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1240 raw->plane[plane_id]);
1241 max_wm = g4x_fbc_fifo_size(level);
1244 * FBC wm is not mandatory as we
1245 * can always just disable its use.
1250 dirty |= raw->fbc != wm;
1254 /* mark watermarks as invalid */
1255 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1257 if (plane_id == PLANE_PRIMARY)
1258 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1262 DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1264 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1265 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1266 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1268 if (plane_id == PLANE_PRIMARY)
1269 DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
1270 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1271 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1277 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1278 enum plane_id plane_id, int level)
1280 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1282 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1285 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1288 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1290 if (level > dev_priv->wm.max_level)
1293 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1294 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1295 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1298 /* mark all levels starting from 'level' as invalid */
1299 static void g4x_invalidate_wms(struct intel_crtc *crtc,
1300 struct g4x_wm_state *wm_state, int level)
1302 if (level <= G4X_WM_LEVEL_NORMAL) {
1303 enum plane_id plane_id;
1305 for_each_plane_id_on_crtc(crtc, plane_id)
1306 wm_state->wm.plane[plane_id] = USHRT_MAX;
1309 if (level <= G4X_WM_LEVEL_SR) {
1310 wm_state->cxsr = false;
1311 wm_state->sr.cursor = USHRT_MAX;
1312 wm_state->sr.plane = USHRT_MAX;
1313 wm_state->sr.fbc = USHRT_MAX;
1316 if (level <= G4X_WM_LEVEL_HPLL) {
1317 wm_state->hpll_en = false;
1318 wm_state->hpll.cursor = USHRT_MAX;
1319 wm_state->hpll.plane = USHRT_MAX;
1320 wm_state->hpll.fbc = USHRT_MAX;
1324 static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1326 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1327 struct intel_atomic_state *state =
1328 to_intel_atomic_state(crtc_state->base.state);
1329 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1330 int num_active_planes = hweight32(crtc_state->active_planes &
1331 ~BIT(PLANE_CURSOR));
1332 const struct g4x_pipe_wm *raw;
1333 struct intel_plane_state *plane_state;
1334 struct intel_plane *plane;
1335 enum plane_id plane_id;
1337 unsigned int dirty = 0;
1339 for_each_intel_plane_in_state(state, plane, plane_state, i) {
1340 const struct intel_plane_state *old_plane_state =
1341 to_intel_plane_state(plane->base.state);
1343 if (plane_state->base.crtc != &crtc->base &&
1344 old_plane_state->base.crtc != &crtc->base)
1347 if (g4x_raw_plane_wm_compute(crtc_state, plane_state))
1348 dirty |= BIT(plane->id);
1354 level = G4X_WM_LEVEL_NORMAL;
1355 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1358 raw = &crtc_state->wm.g4x.raw[level];
1359 for_each_plane_id_on_crtc(crtc, plane_id)
1360 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1362 level = G4X_WM_LEVEL_SR;
1364 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1367 raw = &crtc_state->wm.g4x.raw[level];
1368 wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1369 wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1370 wm_state->sr.fbc = raw->fbc;
1372 wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
1374 level = G4X_WM_LEVEL_HPLL;
1376 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1379 raw = &crtc_state->wm.g4x.raw[level];
1380 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1381 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1382 wm_state->hpll.fbc = raw->fbc;
1384 wm_state->hpll_en = wm_state->cxsr;
1389 if (level == G4X_WM_LEVEL_NORMAL)
1392 /* invalidate the higher levels */
1393 g4x_invalidate_wms(crtc, wm_state, level);
1396 * Determine if the FBC watermark(s) can be used. IF
1397 * this isn't the case we prefer to disable the FBC
1398 ( watermark(s) rather than disable the SR/HPLL
1399 * level(s) entirely.
1401 wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;
1403 if (level >= G4X_WM_LEVEL_SR &&
1404 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1405 wm_state->fbc_en = false;
1406 else if (level >= G4X_WM_LEVEL_HPLL &&
1407 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1408 wm_state->fbc_en = false;
1413 static int g4x_compute_intermediate_wm(struct drm_device *dev,
1414 struct intel_crtc *crtc,
1415 struct intel_crtc_state *crtc_state)
1417 struct g4x_wm_state *intermediate = &crtc_state->wm.g4x.intermediate;
1418 const struct g4x_wm_state *optimal = &crtc_state->wm.g4x.optimal;
1419 const struct g4x_wm_state *active = &crtc->wm.active.g4x;
1420 enum plane_id plane_id;
1422 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1423 !crtc_state->disable_cxsr;
1424 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1425 !crtc_state->disable_cxsr;
1426 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1428 for_each_plane_id_on_crtc(crtc, plane_id) {
1429 intermediate->wm.plane[plane_id] =
1430 max(optimal->wm.plane[plane_id],
1431 active->wm.plane[plane_id]);
1433 WARN_ON(intermediate->wm.plane[plane_id] >
1434 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1437 intermediate->sr.plane = max(optimal->sr.plane,
1439 intermediate->sr.cursor = max(optimal->sr.cursor,
1441 intermediate->sr.fbc = max(optimal->sr.fbc,
1444 intermediate->hpll.plane = max(optimal->hpll.plane,
1445 active->hpll.plane);
1446 intermediate->hpll.cursor = max(optimal->hpll.cursor,
1447 active->hpll.cursor);
1448 intermediate->hpll.fbc = max(optimal->hpll.fbc,
1451 WARN_ON((intermediate->sr.plane >
1452 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1453 intermediate->sr.cursor >
1454 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1455 intermediate->cxsr);
1456 WARN_ON((intermediate->sr.plane >
1457 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1458 intermediate->sr.cursor >
1459 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1460 intermediate->hpll_en);
1462 WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1463 intermediate->fbc_en && intermediate->cxsr);
1464 WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1465 intermediate->fbc_en && intermediate->hpll_en);
1468 * If our intermediate WM are identical to the final WM, then we can
1469 * omit the post-vblank programming; only update if it's different.
1471 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1472 crtc_state->wm.need_postvbl_update = true;
1477 static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1478 struct g4x_wm_values *wm)
1480 struct intel_crtc *crtc;
1481 int num_active_crtcs = 0;
1487 for_each_intel_crtc(&dev_priv->drm, crtc) {
1488 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1493 if (!wm_state->cxsr)
1495 if (!wm_state->hpll_en)
1496 wm->hpll_en = false;
1497 if (!wm_state->fbc_en)
1503 if (num_active_crtcs != 1) {
1505 wm->hpll_en = false;
1509 for_each_intel_crtc(&dev_priv->drm, crtc) {
1510 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1511 enum pipe pipe = crtc->pipe;
1513 wm->pipe[pipe] = wm_state->wm;
1514 if (crtc->active && wm->cxsr)
1515 wm->sr = wm_state->sr;
1516 if (crtc->active && wm->hpll_en)
1517 wm->hpll = wm_state->hpll;
1521 static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1523 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1524 struct g4x_wm_values new_wm = {};
1526 g4x_merge_wm(dev_priv, &new_wm);
1528 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1531 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1532 _intel_set_memory_cxsr(dev_priv, false);
1534 g4x_write_wm_values(dev_priv, &new_wm);
1536 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1537 _intel_set_memory_cxsr(dev_priv, true);
1542 static void g4x_initial_watermarks(struct intel_atomic_state *state,
1543 struct intel_crtc_state *crtc_state)
1545 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1546 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1548 mutex_lock(&dev_priv->wm.wm_mutex);
1549 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1550 g4x_program_watermarks(dev_priv);
1551 mutex_unlock(&dev_priv->wm.wm_mutex);
1554 static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1555 struct intel_crtc_state *crtc_state)
1557 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1558 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
1560 if (!crtc_state->wm.need_postvbl_update)
1563 mutex_lock(&dev_priv->wm.wm_mutex);
1564 intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1565 g4x_program_watermarks(dev_priv);
1566 mutex_unlock(&dev_priv->wm.wm_mutex);
1569 /* latency must be in 0.1us units. */
1570 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1571 unsigned int htotal,
1574 unsigned int latency)
1578 ret = intel_wm_method2(pixel_rate, htotal,
1579 width, cpp, latency);
1580 ret = DIV_ROUND_UP(ret, 64);
1585 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1587 /* all latencies in usec */
1588 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1590 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1592 if (IS_CHERRYVIEW(dev_priv)) {
1593 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1594 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1596 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1600 static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1601 const struct intel_plane_state *plane_state,
1604 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1605 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1606 const struct drm_display_mode *adjusted_mode =
1607 &crtc_state->base.adjusted_mode;
1608 int clock, htotal, cpp, width, wm;
1610 if (dev_priv->wm.pri_latency[level] == 0)
1613 if (!intel_wm_plane_visible(crtc_state, plane_state))
1616 cpp = plane_state->base.fb->format->cpp[0];
1617 clock = adjusted_mode->crtc_clock;
1618 htotal = adjusted_mode->crtc_htotal;
1619 width = crtc_state->pipe_src_w;
1621 if (plane->id == PLANE_CURSOR) {
1623 * FIXME the formula gives values that are
1624 * too big for the cursor FIFO, and hence we
1625 * would never be able to use cursors. For
1626 * now just hardcode the watermark.
1630 wm = vlv_wm_method2(clock, htotal, width, cpp,
1631 dev_priv->wm.pri_latency[level] * 10);
1634 return min_t(int, wm, USHRT_MAX);
1637 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1639 return (active_planes & (BIT(PLANE_SPRITE0) |
1640 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1643 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1645 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1646 const struct g4x_pipe_wm *raw =
1647 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1648 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1649 unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1650 int num_active_planes = hweight32(active_planes);
1651 const int fifo_size = 511;
1652 int fifo_extra, fifo_left = fifo_size;
1653 int sprite0_fifo_extra = 0;
1654 unsigned int total_rate;
1655 enum plane_id plane_id;
1658 * When enabling sprite0 after sprite1 has already been enabled
1659 * we tend to get an underrun unless sprite0 already has some
1660 * FIFO space allcoated. Hence we always allocate at least one
1661 * cacheline for sprite0 whenever sprite1 is enabled.
1663 * All other plane enable sequences appear immune to this problem.
1665 if (vlv_need_sprite0_fifo_workaround(active_planes))
1666 sprite0_fifo_extra = 1;
1668 total_rate = raw->plane[PLANE_PRIMARY] +
1669 raw->plane[PLANE_SPRITE0] +
1670 raw->plane[PLANE_SPRITE1] +
1673 if (total_rate > fifo_size)
1676 if (total_rate == 0)
1679 for_each_plane_id_on_crtc(crtc, plane_id) {
1682 if ((active_planes & BIT(plane_id)) == 0) {
1683 fifo_state->plane[plane_id] = 0;
1687 rate = raw->plane[plane_id];
1688 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1689 fifo_left -= fifo_state->plane[plane_id];
1692 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1693 fifo_left -= sprite0_fifo_extra;
1695 fifo_state->plane[PLANE_CURSOR] = 63;
1697 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1699 /* spread the remainder evenly */
1700 for_each_plane_id_on_crtc(crtc, plane_id) {
1706 if ((active_planes & BIT(plane_id)) == 0)
1709 plane_extra = min(fifo_extra, fifo_left);
1710 fifo_state->plane[plane_id] += plane_extra;
1711 fifo_left -= plane_extra;
1714 WARN_ON(active_planes != 0 && fifo_left != 0);
1716 /* give it all to the first plane if none are active */
1717 if (active_planes == 0) {
1718 WARN_ON(fifo_left != fifo_size);
1719 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1725 /* mark all levels starting from 'level' as invalid */
1726 static void vlv_invalidate_wms(struct intel_crtc *crtc,
1727 struct vlv_wm_state *wm_state, int level)
1729 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1731 for (; level < intel_wm_num_levels(dev_priv); level++) {
1732 enum plane_id plane_id;
1734 for_each_plane_id_on_crtc(crtc, plane_id)
1735 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1737 wm_state->sr[level].cursor = USHRT_MAX;
1738 wm_state->sr[level].plane = USHRT_MAX;
1742 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1747 return fifo_size - wm;
1751 * Starting from 'level' set all higher
1752 * levels to 'value' in the "raw" watermarks.
1754 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1755 int level, enum plane_id plane_id, u16 value)
1757 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1758 int num_levels = intel_wm_num_levels(dev_priv);
1761 for (; level < num_levels; level++) {
1762 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1764 dirty |= raw->plane[plane_id] != value;
1765 raw->plane[plane_id] = value;
1771 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1772 const struct intel_plane_state *plane_state)
1774 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1775 enum plane_id plane_id = plane->id;
1776 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1780 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1781 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1785 for (level = 0; level < num_levels; level++) {
1786 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1787 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1788 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1793 dirty |= raw->plane[plane_id] != wm;
1794 raw->plane[plane_id] = wm;
1797 /* mark all higher levels as invalid */
1798 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1802 DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1804 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1805 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1806 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1811 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1812 enum plane_id plane_id, int level)
1814 const struct g4x_pipe_wm *raw =
1815 &crtc_state->wm.vlv.raw[level];
1816 const struct vlv_fifo_state *fifo_state =
1817 &crtc_state->wm.vlv.fifo_state;
1819 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1822 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1824 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1825 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1826 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1827 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1830 static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1832 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1833 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1834 struct intel_atomic_state *state =
1835 to_intel_atomic_state(crtc_state->base.state);
1836 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1837 const struct vlv_fifo_state *fifo_state =
1838 &crtc_state->wm.vlv.fifo_state;
1839 int num_active_planes = hweight32(crtc_state->active_planes &
1840 ~BIT(PLANE_CURSOR));
1841 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
1842 struct intel_plane_state *plane_state;
1843 struct intel_plane *plane;
1844 enum plane_id plane_id;
1846 unsigned int dirty = 0;
1848 for_each_intel_plane_in_state(state, plane, plane_state, i) {
1849 const struct intel_plane_state *old_plane_state =
1850 to_intel_plane_state(plane->base.state);
1852 if (plane_state->base.crtc != &crtc->base &&
1853 old_plane_state->base.crtc != &crtc->base)
1856 if (vlv_raw_plane_wm_compute(crtc_state, plane_state))
1857 dirty |= BIT(plane->id);
1861 * DSPARB registers may have been reset due to the
1862 * power well being turned off. Make sure we restore
1863 * them to a consistent state even if no primary/sprite
1864 * planes are initially active.
1867 crtc_state->fifo_changed = true;
1872 /* cursor changes don't warrant a FIFO recompute */
1873 if (dirty & ~BIT(PLANE_CURSOR)) {
1874 const struct intel_crtc_state *old_crtc_state =
1875 to_intel_crtc_state(crtc->base.state);
1876 const struct vlv_fifo_state *old_fifo_state =
1877 &old_crtc_state->wm.vlv.fifo_state;
1879 ret = vlv_compute_fifo(crtc_state);
1883 if (needs_modeset ||
1884 memcmp(old_fifo_state, fifo_state,
1885 sizeof(*fifo_state)) != 0)
1886 crtc_state->fifo_changed = true;
1889 /* initially allow all levels */
1890 wm_state->num_levels = intel_wm_num_levels(dev_priv);
1892 * Note that enabling cxsr with no primary/sprite planes
1893 * enabled can wedge the pipe. Hence we only allow cxsr
1894 * with exactly one enabled primary/sprite plane.
1896 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1898 for (level = 0; level < wm_state->num_levels; level++) {
1899 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1900 const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
1902 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1905 for_each_plane_id_on_crtc(crtc, plane_id) {
1906 wm_state->wm[level].plane[plane_id] =
1907 vlv_invert_wm_value(raw->plane[plane_id],
1908 fifo_state->plane[plane_id]);
1911 wm_state->sr[level].plane =
1912 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1913 raw->plane[PLANE_SPRITE0],
1914 raw->plane[PLANE_SPRITE1]),
1917 wm_state->sr[level].cursor =
1918 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1925 /* limit to only levels we can actually handle */
1926 wm_state->num_levels = level;
1928 /* invalidate the higher levels */
1929 vlv_invalidate_wms(crtc, wm_state, level);
1934 #define VLV_FIFO(plane, value) \
1935 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1937 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1938 struct intel_crtc_state *crtc_state)
1940 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1941 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1942 const struct vlv_fifo_state *fifo_state =
1943 &crtc_state->wm.vlv.fifo_state;
1944 int sprite0_start, sprite1_start, fifo_size;
1946 if (!crtc_state->fifo_changed)
1949 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1950 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1951 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1953 WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
1954 WARN_ON(fifo_size != 511);
1956 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
1959 * uncore.lock serves a double purpose here. It allows us to
1960 * use the less expensive I915_{READ,WRITE}_FW() functions, and
1961 * it protects the DSPARB registers from getting clobbered by
1962 * parallel updates from multiple pipes.
1964 * intel_pipe_update_start() has already disabled interrupts
1965 * for us, so a plain spin_lock() is sufficient here.
1967 spin_lock(&dev_priv->uncore.lock);
1969 switch (crtc->pipe) {
1970 uint32_t dsparb, dsparb2, dsparb3;
1972 dsparb = I915_READ_FW(DSPARB);
1973 dsparb2 = I915_READ_FW(DSPARB2);
1975 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1976 VLV_FIFO(SPRITEB, 0xff));
1977 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1978 VLV_FIFO(SPRITEB, sprite1_start));
1980 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1981 VLV_FIFO(SPRITEB_HI, 0x1));
1982 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1983 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1985 I915_WRITE_FW(DSPARB, dsparb);
1986 I915_WRITE_FW(DSPARB2, dsparb2);
1989 dsparb = I915_READ_FW(DSPARB);
1990 dsparb2 = I915_READ_FW(DSPARB2);
1992 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1993 VLV_FIFO(SPRITED, 0xff));
1994 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1995 VLV_FIFO(SPRITED, sprite1_start));
1997 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1998 VLV_FIFO(SPRITED_HI, 0xff));
1999 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
2000 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
2002 I915_WRITE_FW(DSPARB, dsparb);
2003 I915_WRITE_FW(DSPARB2, dsparb2);
2006 dsparb3 = I915_READ_FW(DSPARB3);
2007 dsparb2 = I915_READ_FW(DSPARB2);
2009 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
2010 VLV_FIFO(SPRITEF, 0xff));
2011 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
2012 VLV_FIFO(SPRITEF, sprite1_start));
2014 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
2015 VLV_FIFO(SPRITEF_HI, 0xff));
2016 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
2017 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
2019 I915_WRITE_FW(DSPARB3, dsparb3);
2020 I915_WRITE_FW(DSPARB2, dsparb2);
2026 POSTING_READ_FW(DSPARB);
2028 spin_unlock(&dev_priv->uncore.lock);
2033 static int vlv_compute_intermediate_wm(struct drm_device *dev,
2034 struct intel_crtc *crtc,
2035 struct intel_crtc_state *crtc_state)
2037 struct vlv_wm_state *intermediate = &crtc_state->wm.vlv.intermediate;
2038 const struct vlv_wm_state *optimal = &crtc_state->wm.vlv.optimal;
2039 const struct vlv_wm_state *active = &crtc->wm.active.vlv;
2042 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2043 intermediate->cxsr = optimal->cxsr && active->cxsr &&
2044 !crtc_state->disable_cxsr;
2046 for (level = 0; level < intermediate->num_levels; level++) {
2047 enum plane_id plane_id;
2049 for_each_plane_id_on_crtc(crtc, plane_id) {
2050 intermediate->wm[level].plane[plane_id] =
2051 min(optimal->wm[level].plane[plane_id],
2052 active->wm[level].plane[plane_id]);
2055 intermediate->sr[level].plane = min(optimal->sr[level].plane,
2056 active->sr[level].plane);
2057 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2058 active->sr[level].cursor);
2061 vlv_invalidate_wms(crtc, intermediate, level);
2064 * If our intermediate WM are identical to the final WM, then we can
2065 * omit the post-vblank programming; only update if it's different.
2067 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2068 crtc_state->wm.need_postvbl_update = true;
2073 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2074 struct vlv_wm_values *wm)
2076 struct intel_crtc *crtc;
2077 int num_active_crtcs = 0;
2079 wm->level = dev_priv->wm.max_level;
2082 for_each_intel_crtc(&dev_priv->drm, crtc) {
2083 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2088 if (!wm_state->cxsr)
2092 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2095 if (num_active_crtcs != 1)
2098 if (num_active_crtcs > 1)
2099 wm->level = VLV_WM_LEVEL_PM2;
2101 for_each_intel_crtc(&dev_priv->drm, crtc) {
2102 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2103 enum pipe pipe = crtc->pipe;
2105 wm->pipe[pipe] = wm_state->wm[wm->level];
2106 if (crtc->active && wm->cxsr)
2107 wm->sr = wm_state->sr[wm->level];
2109 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2110 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2111 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2112 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2116 static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2118 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2119 struct vlv_wm_values new_wm = {};
2121 vlv_merge_wm(dev_priv, &new_wm);
2123 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2126 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2127 chv_set_memory_dvfs(dev_priv, false);
2129 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2130 chv_set_memory_pm5(dev_priv, false);
2132 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2133 _intel_set_memory_cxsr(dev_priv, false);
2135 vlv_write_wm_values(dev_priv, &new_wm);
2137 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2138 _intel_set_memory_cxsr(dev_priv, true);
2140 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2141 chv_set_memory_pm5(dev_priv, true);
2143 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2144 chv_set_memory_dvfs(dev_priv, true);
2149 static void vlv_initial_watermarks(struct intel_atomic_state *state,
2150 struct intel_crtc_state *crtc_state)
2152 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2153 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2155 mutex_lock(&dev_priv->wm.wm_mutex);
2156 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2157 vlv_program_watermarks(dev_priv);
2158 mutex_unlock(&dev_priv->wm.wm_mutex);
2161 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2162 struct intel_crtc_state *crtc_state)
2164 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2165 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2167 if (!crtc_state->wm.need_postvbl_update)
2170 mutex_lock(&dev_priv->wm.wm_mutex);
2171 intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2172 vlv_program_watermarks(dev_priv);
2173 mutex_unlock(&dev_priv->wm.wm_mutex);
2176 static void i965_update_wm(struct intel_crtc *unused_crtc)
2178 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2179 struct intel_crtc *crtc;
2184 /* Calc sr entries for one plane configs */
2185 crtc = single_enabled_crtc(dev_priv);
2187 /* self-refresh has much higher latency */
2188 static const int sr_latency_ns = 12000;
2189 const struct drm_display_mode *adjusted_mode =
2190 &crtc->config->base.adjusted_mode;
2191 const struct drm_framebuffer *fb =
2192 crtc->base.primary->state->fb;
2193 int clock = adjusted_mode->crtc_clock;
2194 int htotal = adjusted_mode->crtc_htotal;
2195 int hdisplay = crtc->config->pipe_src_w;
2196 int cpp = fb->format->cpp[0];
2199 entries = intel_wm_method2(clock, htotal,
2200 hdisplay, cpp, sr_latency_ns / 100);
2201 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2202 srwm = I965_FIFO_SIZE - entries;
2206 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
2209 entries = intel_wm_method2(clock, htotal,
2210 crtc->base.cursor->state->crtc_w, 4,
2211 sr_latency_ns / 100);
2212 entries = DIV_ROUND_UP(entries,
2213 i965_cursor_wm_info.cacheline_size) +
2214 i965_cursor_wm_info.guard_size;
2216 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2217 if (cursor_sr > i965_cursor_wm_info.max_wm)
2218 cursor_sr = i965_cursor_wm_info.max_wm;
2220 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
2221 "cursor %d\n", srwm, cursor_sr);
2223 cxsr_enabled = true;
2225 cxsr_enabled = false;
2226 /* Turn off self refresh if both pipes are enabled */
2227 intel_set_memory_cxsr(dev_priv, false);
2230 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2233 /* 965 has limitations... */
2234 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
2238 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
2239 FW_WM(8, PLANEC_OLD));
2240 /* update cursor SR watermark */
2241 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2244 intel_set_memory_cxsr(dev_priv, true);
2249 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2251 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2252 const struct intel_watermark_params *wm_info;
2257 int planea_wm, planeb_wm;
2258 struct intel_crtc *crtc, *enabled = NULL;
2260 if (IS_I945GM(dev_priv))
2261 wm_info = &i945_wm_info;
2262 else if (!IS_GEN2(dev_priv))
2263 wm_info = &i915_wm_info;
2265 wm_info = &i830_a_wm_info;
2267 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
2268 crtc = intel_get_crtc_for_plane(dev_priv, 0);
2269 if (intel_crtc_active(crtc)) {
2270 const struct drm_display_mode *adjusted_mode =
2271 &crtc->config->base.adjusted_mode;
2272 const struct drm_framebuffer *fb =
2273 crtc->base.primary->state->fb;
2276 if (IS_GEN2(dev_priv))
2279 cpp = fb->format->cpp[0];
2281 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2282 wm_info, fifo_size, cpp,
2283 pessimal_latency_ns);
2286 planea_wm = fifo_size - wm_info->guard_size;
2287 if (planea_wm > (long)wm_info->max_wm)
2288 planea_wm = wm_info->max_wm;
2291 if (IS_GEN2(dev_priv))
2292 wm_info = &i830_bc_wm_info;
2294 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
2295 crtc = intel_get_crtc_for_plane(dev_priv, 1);
2296 if (intel_crtc_active(crtc)) {
2297 const struct drm_display_mode *adjusted_mode =
2298 &crtc->config->base.adjusted_mode;
2299 const struct drm_framebuffer *fb =
2300 crtc->base.primary->state->fb;
2303 if (IS_GEN2(dev_priv))
2306 cpp = fb->format->cpp[0];
2308 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2309 wm_info, fifo_size, cpp,
2310 pessimal_latency_ns);
2311 if (enabled == NULL)
2316 planeb_wm = fifo_size - wm_info->guard_size;
2317 if (planeb_wm > (long)wm_info->max_wm)
2318 planeb_wm = wm_info->max_wm;
2321 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2323 if (IS_I915GM(dev_priv) && enabled) {
2324 struct drm_i915_gem_object *obj;
2326 obj = intel_fb_obj(enabled->base.primary->state->fb);
2328 /* self-refresh seems busted with untiled */
2329 if (!i915_gem_object_is_tiled(obj))
2334 * Overlay gets an aggressive default since video jitter is bad.
2338 /* Play safe and disable self-refresh before adjusting watermarks. */
2339 intel_set_memory_cxsr(dev_priv, false);
2341 /* Calc sr entries for one plane configs */
2342 if (HAS_FW_BLC(dev_priv) && enabled) {
2343 /* self-refresh has much higher latency */
2344 static const int sr_latency_ns = 6000;
2345 const struct drm_display_mode *adjusted_mode =
2346 &enabled->config->base.adjusted_mode;
2347 const struct drm_framebuffer *fb =
2348 enabled->base.primary->state->fb;
2349 int clock = adjusted_mode->crtc_clock;
2350 int htotal = adjusted_mode->crtc_htotal;
2351 int hdisplay = enabled->config->pipe_src_w;
2355 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2358 cpp = fb->format->cpp[0];
2360 entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
2361 sr_latency_ns / 100);
2362 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2363 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
2364 srwm = wm_info->fifo_size - entries;
2368 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2369 I915_WRITE(FW_BLC_SELF,
2370 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2372 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2375 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2376 planea_wm, planeb_wm, cwm, srwm);
2378 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2379 fwater_hi = (cwm & 0x1f);
2381 /* Set request length to 8 cachelines per fetch */
2382 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2383 fwater_hi = fwater_hi | (1 << 8);
2385 I915_WRITE(FW_BLC, fwater_lo);
2386 I915_WRITE(FW_BLC2, fwater_hi);
2389 intel_set_memory_cxsr(dev_priv, true);
2392 static void i845_update_wm(struct intel_crtc *unused_crtc)
2394 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2395 struct intel_crtc *crtc;
2396 const struct drm_display_mode *adjusted_mode;
2400 crtc = single_enabled_crtc(dev_priv);
2404 adjusted_mode = &crtc->config->base.adjusted_mode;
2405 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2407 dev_priv->display.get_fifo_size(dev_priv, 0),
2408 4, pessimal_latency_ns);
2409 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2410 fwater_lo |= (3<<8) | planea_wm;
2412 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
2414 I915_WRITE(FW_BLC, fwater_lo);
2417 /* latency must be in 0.1us units. */
2418 static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2420 unsigned int latency)
2424 ret = intel_wm_method1(pixel_rate, cpp, latency);
2425 ret = DIV_ROUND_UP(ret, 64) + 2;
2430 /* latency must be in 0.1us units. */
2431 static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2432 unsigned int htotal,
2435 unsigned int latency)
2439 ret = intel_wm_method2(pixel_rate, htotal,
2440 width, cpp, latency);
2441 ret = DIV_ROUND_UP(ret, 64) + 2;
2446 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2450 * Neither of these should be possible since this function shouldn't be
2451 * called if the CRTC is off or the plane is invisible. But let's be
2452 * extra paranoid to avoid a potential divide-by-zero if we screw up
2453 * elsewhere in the driver.
2457 if (WARN_ON(!horiz_pixels))
2460 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2463 struct ilk_wm_maximums {
2471 * For both WM_PIPE and WM_LP.
2472 * mem_value must be in 0.1us units.
2474 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2475 const struct intel_plane_state *pstate,
2479 uint32_t method1, method2;
2485 if (!intel_wm_plane_visible(cstate, pstate))
2488 cpp = pstate->base.fb->format->cpp[0];
2490 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2495 method2 = ilk_wm_method2(cstate->pixel_rate,
2496 cstate->base.adjusted_mode.crtc_htotal,
2497 drm_rect_width(&pstate->base.dst),
2500 return min(method1, method2);
2504 * For both WM_PIPE and WM_LP.
2505 * mem_value must be in 0.1us units.
2507 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2508 const struct intel_plane_state *pstate,
2511 uint32_t method1, method2;
2517 if (!intel_wm_plane_visible(cstate, pstate))
2520 cpp = pstate->base.fb->format->cpp[0];
2522 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2523 method2 = ilk_wm_method2(cstate->pixel_rate,
2524 cstate->base.adjusted_mode.crtc_htotal,
2525 drm_rect_width(&pstate->base.dst),
2527 return min(method1, method2);
2531 * For both WM_PIPE and WM_LP.
2532 * mem_value must be in 0.1us units.
2534 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2535 const struct intel_plane_state *pstate,
2543 if (!intel_wm_plane_visible(cstate, pstate))
2546 cpp = pstate->base.fb->format->cpp[0];
2548 return ilk_wm_method2(cstate->pixel_rate,
2549 cstate->base.adjusted_mode.crtc_htotal,
2550 pstate->base.crtc_w, cpp, mem_value);
2553 /* Only for WM_LP. */
2554 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
2555 const struct intel_plane_state *pstate,
2560 if (!intel_wm_plane_visible(cstate, pstate))
2563 cpp = pstate->base.fb->format->cpp[0];
2565 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
2569 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2571 if (INTEL_GEN(dev_priv) >= 8)
2573 else if (INTEL_GEN(dev_priv) >= 7)
2580 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2581 int level, bool is_sprite)
2583 if (INTEL_GEN(dev_priv) >= 8)
2584 /* BDW primary/sprite plane watermarks */
2585 return level == 0 ? 255 : 2047;
2586 else if (INTEL_GEN(dev_priv) >= 7)
2587 /* IVB/HSW primary/sprite plane watermarks */
2588 return level == 0 ? 127 : 1023;
2589 else if (!is_sprite)
2590 /* ILK/SNB primary plane watermarks */
2591 return level == 0 ? 127 : 511;
2593 /* ILK/SNB sprite plane watermarks */
2594 return level == 0 ? 63 : 255;
2598 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2600 if (INTEL_GEN(dev_priv) >= 7)
2601 return level == 0 ? 63 : 255;
2603 return level == 0 ? 31 : 63;
2606 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2608 if (INTEL_GEN(dev_priv) >= 8)
2614 /* Calculate the maximum primary/sprite plane watermark */
2615 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2617 const struct intel_wm_config *config,
2618 enum intel_ddb_partitioning ddb_partitioning,
2621 struct drm_i915_private *dev_priv = to_i915(dev);
2622 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2624 /* if sprites aren't enabled, sprites get nothing */
2625 if (is_sprite && !config->sprites_enabled)
2628 /* HSW allows LP1+ watermarks even with multiple pipes */
2629 if (level == 0 || config->num_pipes_active > 1) {
2630 fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
2633 * For some reason the non self refresh
2634 * FIFO size is only half of the self
2635 * refresh FIFO size on ILK/SNB.
2637 if (INTEL_GEN(dev_priv) <= 6)
2641 if (config->sprites_enabled) {
2642 /* level 0 is always calculated with 1:1 split */
2643 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2652 /* clamp to max that the registers can hold */
2653 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2656 /* Calculate the maximum cursor plane watermark */
2657 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2659 const struct intel_wm_config *config)
2661 /* HSW LP1+ watermarks w/ multiple pipes */
2662 if (level > 0 && config->num_pipes_active > 1)
2665 /* otherwise just report max that registers can hold */
2666 return ilk_cursor_wm_reg_max(to_i915(dev), level);
2669 static void ilk_compute_wm_maximums(const struct drm_device *dev,
2671 const struct intel_wm_config *config,
2672 enum intel_ddb_partitioning ddb_partitioning,
2673 struct ilk_wm_maximums *max)
2675 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2676 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2677 max->cur = ilk_cursor_wm_max(dev, level, config);
2678 max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
2681 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2683 struct ilk_wm_maximums *max)
2685 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2686 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2687 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2688 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2691 static bool ilk_validate_wm_level(int level,
2692 const struct ilk_wm_maximums *max,
2693 struct intel_wm_level *result)
2697 /* already determined to be invalid? */
2698 if (!result->enable)
2701 result->enable = result->pri_val <= max->pri &&
2702 result->spr_val <= max->spr &&
2703 result->cur_val <= max->cur;
2705 ret = result->enable;
2708 * HACK until we can pre-compute everything,
2709 * and thus fail gracefully if LP0 watermarks
2712 if (level == 0 && !result->enable) {
2713 if (result->pri_val > max->pri)
2714 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2715 level, result->pri_val, max->pri);
2716 if (result->spr_val > max->spr)
2717 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2718 level, result->spr_val, max->spr);
2719 if (result->cur_val > max->cur)
2720 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2721 level, result->cur_val, max->cur);
2723 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2724 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2725 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2726 result->enable = true;
2732 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2733 const struct intel_crtc *intel_crtc,
2735 struct intel_crtc_state *cstate,
2736 const struct intel_plane_state *pristate,
2737 const struct intel_plane_state *sprstate,
2738 const struct intel_plane_state *curstate,
2739 struct intel_wm_level *result)
2741 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2742 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2743 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2745 /* WM1+ latency values stored in 0.5us units */
2753 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2754 pri_latency, level);
2755 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2759 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2762 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2764 result->enable = true;
2768 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2770 const struct intel_atomic_state *intel_state =
2771 to_intel_atomic_state(cstate->base.state);
2772 const struct drm_display_mode *adjusted_mode =
2773 &cstate->base.adjusted_mode;
2774 u32 linetime, ips_linetime;
2776 if (!cstate->base.active)
2778 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2780 if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
2783 /* The WM are computed with base on how long it takes to fill a single
2784 * row at the given clock rate, multiplied by 8.
2786 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2787 adjusted_mode->crtc_clock);
2788 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2789 intel_state->cdclk.logical.cdclk);
2791 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2792 PIPE_WM_LINETIME_TIME(linetime);
2795 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2798 if (INTEL_GEN(dev_priv) >= 9) {
2801 int level, max_level = ilk_wm_max_level(dev_priv);
2803 /* read the first set of memory latencies[0:3] */
2804 val = 0; /* data0 to be programmed to 0 for first set */
2805 mutex_lock(&dev_priv->rps.hw_lock);
2806 ret = sandybridge_pcode_read(dev_priv,
2807 GEN9_PCODE_READ_MEM_LATENCY,
2809 mutex_unlock(&dev_priv->rps.hw_lock);
2812 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2816 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2817 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2818 GEN9_MEM_LATENCY_LEVEL_MASK;
2819 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2820 GEN9_MEM_LATENCY_LEVEL_MASK;
2821 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2822 GEN9_MEM_LATENCY_LEVEL_MASK;
2824 /* read the second set of memory latencies[4:7] */
2825 val = 1; /* data0 to be programmed to 1 for second set */
2826 mutex_lock(&dev_priv->rps.hw_lock);
2827 ret = sandybridge_pcode_read(dev_priv,
2828 GEN9_PCODE_READ_MEM_LATENCY,
2830 mutex_unlock(&dev_priv->rps.hw_lock);
2832 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2836 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2837 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2838 GEN9_MEM_LATENCY_LEVEL_MASK;
2839 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2840 GEN9_MEM_LATENCY_LEVEL_MASK;
2841 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2842 GEN9_MEM_LATENCY_LEVEL_MASK;
2845 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2846 * need to be disabled. We make sure to sanitize the values out
2847 * of the punit to satisfy this requirement.
2849 for (level = 1; level <= max_level; level++) {
2850 if (wm[level] == 0) {
2851 for (i = level + 1; i <= max_level; i++)
2858 * WaWmMemoryReadLatency:skl+,glk
2860 * punit doesn't take into account the read latency so we need
2861 * to add 2us to the various latency levels we retrieve from the
2862 * punit when level 0 response data us 0us.
2866 for (level = 1; level <= max_level; level++) {
2873 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2874 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2876 wm[0] = (sskpd >> 56) & 0xFF;
2878 wm[0] = sskpd & 0xF;
2879 wm[1] = (sskpd >> 4) & 0xFF;
2880 wm[2] = (sskpd >> 12) & 0xFF;
2881 wm[3] = (sskpd >> 20) & 0x1FF;
2882 wm[4] = (sskpd >> 32) & 0x1FF;
2883 } else if (INTEL_GEN(dev_priv) >= 6) {
2884 uint32_t sskpd = I915_READ(MCH_SSKPD);
2886 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2887 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2888 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2889 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2890 } else if (INTEL_GEN(dev_priv) >= 5) {
2891 uint32_t mltr = I915_READ(MLTR_ILK);
2893 /* ILK primary LP0 latency is 700 ns */
2895 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2896 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2898 MISSING_CASE(INTEL_DEVID(dev_priv));
2902 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2905 /* ILK sprite LP0 latency is 1300 ns */
2906 if (IS_GEN5(dev_priv))
2910 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2913 /* ILK cursor LP0 latency is 1300 ns */
2914 if (IS_GEN5(dev_priv))
2917 /* WaDoubleCursorLP3Latency:ivb */
2918 if (IS_IVYBRIDGE(dev_priv))
2922 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2924 /* how many WM levels are we expecting */
2925 if (INTEL_GEN(dev_priv) >= 9)
2927 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2929 else if (INTEL_GEN(dev_priv) >= 6)
2935 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2937 const uint16_t wm[])
2939 int level, max_level = ilk_wm_max_level(dev_priv);
2941 for (level = 0; level <= max_level; level++) {
2942 unsigned int latency = wm[level];
2945 DRM_DEBUG_KMS("%s WM%d latency not provided\n",
2951 * - latencies are in us on gen9.
2952 * - before then, WM1+ latency values are in 0.5us units
2954 if (INTEL_GEN(dev_priv) >= 9)
2959 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2960 name, level, wm[level],
2961 latency / 10, latency % 10);
2965 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2966 uint16_t wm[5], uint16_t min)
2968 int level, max_level = ilk_wm_max_level(dev_priv);
2973 wm[0] = max(wm[0], min);
2974 for (level = 1; level <= max_level; level++)
2975 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2980 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
2985 * The BIOS provided WM memory latency values are often
2986 * inadequate for high resolution displays. Adjust them.
2988 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2989 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2990 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2995 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2996 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2997 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2998 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3001 static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3004 * On some SNB machines (Thinkpad X220 Tablet at least)
3005 * LP3 usage can cause vblank interrupts to be lost.
3006 * The DEIIR bit will go high but it looks like the CPU
3007 * never gets interrupted.
3009 * It's not clear whether other interrupt source could
3010 * be affected or if this is somehow limited to vblank
3011 * interrupts only. To play it safe we disable LP3
3012 * watermarks entirely.
3014 if (dev_priv->wm.pri_latency[3] == 0 &&
3015 dev_priv->wm.spr_latency[3] == 0 &&
3016 dev_priv->wm.cur_latency[3] == 0)
3019 dev_priv->wm.pri_latency[3] = 0;
3020 dev_priv->wm.spr_latency[3] = 0;
3021 dev_priv->wm.cur_latency[3] = 0;
3023 DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
3024 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3025 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3026 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3029 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3031 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3033 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
3034 sizeof(dev_priv->wm.pri_latency));
3035 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
3036 sizeof(dev_priv->wm.pri_latency));
3038 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3039 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3041 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3042 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3043 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3045 if (IS_GEN6(dev_priv)) {
3046 snb_wm_latency_quirk(dev_priv);
3047 snb_wm_lp3_irq_quirk(dev_priv);
3051 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3053 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3054 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3057 static bool ilk_validate_pipe_wm(struct drm_device *dev,
3058 struct intel_pipe_wm *pipe_wm)
3060 /* LP0 watermark maximums depend on this pipe alone */
3061 const struct intel_wm_config config = {
3062 .num_pipes_active = 1,
3063 .sprites_enabled = pipe_wm->sprites_enabled,
3064 .sprites_scaled = pipe_wm->sprites_scaled,
3066 struct ilk_wm_maximums max;
3068 /* LP0 watermarks always use 1/2 DDB partitioning */
3069 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
3071 /* At least LP0 must be valid */
3072 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3073 DRM_DEBUG_KMS("LP0 watermark invalid\n");
3080 /* Compute new watermarks for the pipe */
3081 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
3083 struct drm_atomic_state *state = cstate->base.state;
3084 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3085 struct intel_pipe_wm *pipe_wm;
3086 struct drm_device *dev = state->dev;
3087 const struct drm_i915_private *dev_priv = to_i915(dev);
3088 struct drm_plane *plane;
3089 const struct drm_plane_state *plane_state;
3090 const struct intel_plane_state *pristate = NULL;
3091 const struct intel_plane_state *sprstate = NULL;
3092 const struct intel_plane_state *curstate = NULL;
3093 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3094 struct ilk_wm_maximums max;
3096 pipe_wm = &cstate->wm.ilk.optimal;
3098 drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
3099 const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
3101 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3103 else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
3105 else if (plane->type == DRM_PLANE_TYPE_CURSOR)
3109 pipe_wm->pipe_enabled = cstate->base.active;
3111 pipe_wm->sprites_enabled = sprstate->base.visible;
3112 pipe_wm->sprites_scaled = sprstate->base.visible &&
3113 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
3114 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
3117 usable_level = max_level;
3119 /* ILK/SNB: LP2+ watermarks only w/o sprites */
3120 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3123 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
3124 if (pipe_wm->sprites_scaled)
3127 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3128 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
3129 pristate, sprstate, curstate, &pipe_wm->wm[0]);
3131 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3132 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
3134 if (!ilk_validate_pipe_wm(dev, pipe_wm))
3137 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3139 for (level = 1; level <= usable_level; level++) {
3140 struct intel_wm_level *wm = &pipe_wm->wm[level];
3142 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
3143 pristate, sprstate, curstate, wm);
3146 * Disable any watermark level that exceeds the
3147 * register maximums since such watermarks are
3150 if (!ilk_validate_wm_level(level, &max, wm)) {
3151 memset(wm, 0, sizeof(*wm));
3160 * Build a set of 'intermediate' watermark values that satisfy both the old
3161 * state and the new state. These can be programmed to the hardware
3164 static int ilk_compute_intermediate_wm(struct drm_device *dev,
3165 struct intel_crtc *intel_crtc,
3166 struct intel_crtc_state *newstate)
3168 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3169 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
3170 int level, max_level = ilk_wm_max_level(to_i915(dev));
3173 * Start with the final, target watermarks, then combine with the
3174 * currently active watermarks to get values that are safe both before
3175 * and after the vblank.
3177 *a = newstate->wm.ilk.optimal;
3178 a->pipe_enabled |= b->pipe_enabled;
3179 a->sprites_enabled |= b->sprites_enabled;
3180 a->sprites_scaled |= b->sprites_scaled;
3182 for (level = 0; level <= max_level; level++) {
3183 struct intel_wm_level *a_wm = &a->wm[level];
3184 const struct intel_wm_level *b_wm = &b->wm[level];
3186 a_wm->enable &= b_wm->enable;
3187 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3188 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3189 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3190 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3194 * We need to make sure that these merged watermark values are
3195 * actually a valid configuration themselves. If they're not,
3196 * there's no safe way to transition from the old state to
3197 * the new state, so we need to fail the atomic transaction.
3199 if (!ilk_validate_pipe_wm(dev, a))
3203 * If our intermediate WM are identical to the final WM, then we can
3204 * omit the post-vblank programming; only update if it's different.
3206 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
3207 newstate->wm.need_postvbl_update = true;
3213 * Merge the watermarks from all active pipes for a specific level.
3215 static void ilk_merge_wm_level(struct drm_device *dev,
3217 struct intel_wm_level *ret_wm)
3219 const struct intel_crtc *intel_crtc;
3221 ret_wm->enable = true;
3223 for_each_intel_crtc(dev, intel_crtc) {
3224 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3225 const struct intel_wm_level *wm = &active->wm[level];
3227 if (!active->pipe_enabled)
3231 * The watermark values may have been used in the past,
3232 * so we must maintain them in the registers for some
3233 * time even if the level is now disabled.
3236 ret_wm->enable = false;
3238 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3239 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3240 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3241 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3246 * Merge all low power watermarks for all active pipes.
3248 static void ilk_wm_merge(struct drm_device *dev,
3249 const struct intel_wm_config *config,
3250 const struct ilk_wm_maximums *max,
3251 struct intel_pipe_wm *merged)
3253 struct drm_i915_private *dev_priv = to_i915(dev);
3254 int level, max_level = ilk_wm_max_level(dev_priv);
3255 int last_enabled_level = max_level;
3257 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3258 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3259 config->num_pipes_active > 1)
3260 last_enabled_level = 0;
3262 /* ILK: FBC WM must be disabled always */
3263 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
3265 /* merge each WM1+ level */
3266 for (level = 1; level <= max_level; level++) {
3267 struct intel_wm_level *wm = &merged->wm[level];
3269 ilk_merge_wm_level(dev, level, wm);
3271 if (level > last_enabled_level)
3273 else if (!ilk_validate_wm_level(level, max, wm))
3274 /* make sure all following levels get disabled */
3275 last_enabled_level = level - 1;
3278 * The spec says it is preferred to disable
3279 * FBC WMs instead of disabling a WM level.
3281 if (wm->fbc_val > max->fbc) {
3283 merged->fbc_wm_enabled = false;
3288 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3290 * FIXME this is racy. FBC might get enabled later.
3291 * What we should check here is whether FBC can be
3292 * enabled sometime later.
3294 if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
3295 intel_fbc_is_active(dev_priv)) {
3296 for (level = 2; level <= max_level; level++) {
3297 struct intel_wm_level *wm = &merged->wm[level];
3304 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3306 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3307 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3310 /* The value we need to program into the WM_LPx latency field */
3311 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
3313 struct drm_i915_private *dev_priv = to_i915(dev);
3315 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3318 return dev_priv->wm.pri_latency[level];
3321 static void ilk_compute_wm_results(struct drm_device *dev,
3322 const struct intel_pipe_wm *merged,
3323 enum intel_ddb_partitioning partitioning,
3324 struct ilk_wm_values *results)
3326 struct drm_i915_private *dev_priv = to_i915(dev);
3327 struct intel_crtc *intel_crtc;
3330 results->enable_fbc_wm = merged->fbc_wm_enabled;
3331 results->partitioning = partitioning;
3333 /* LP1+ register values */
3334 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3335 const struct intel_wm_level *r;
3337 level = ilk_wm_lp_to_level(wm_lp, merged);
3339 r = &merged->wm[level];
3342 * Maintain the watermark values even if the level is
3343 * disabled. Doing otherwise could cause underruns.
3345 results->wm_lp[wm_lp - 1] =
3346 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
3347 (r->pri_val << WM1_LP_SR_SHIFT) |
3351 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
3353 if (INTEL_GEN(dev_priv) >= 8)
3354 results->wm_lp[wm_lp - 1] |=
3355 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
3357 results->wm_lp[wm_lp - 1] |=
3358 r->fbc_val << WM1_LP_FBC_SHIFT;
3361 * Always set WM1S_LP_EN when spr_val != 0, even if the
3362 * level is disabled. Doing otherwise could cause underruns.
3364 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
3365 WARN_ON(wm_lp != 1);
3366 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
3368 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3371 /* LP0 register values */
3372 for_each_intel_crtc(dev, intel_crtc) {
3373 enum pipe pipe = intel_crtc->pipe;
3374 const struct intel_wm_level *r =
3375 &intel_crtc->wm.active.ilk.wm[0];
3377 if (WARN_ON(!r->enable))
3380 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
3382 results->wm_pipe[pipe] =
3383 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
3384 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
3389 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
3390 * case both are at the same level. Prefer r1 in case they're the same. */
3391 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
3392 struct intel_pipe_wm *r1,
3393 struct intel_pipe_wm *r2)
3395 int level, max_level = ilk_wm_max_level(to_i915(dev));
3396 int level1 = 0, level2 = 0;
3398 for (level = 1; level <= max_level; level++) {
3399 if (r1->wm[level].enable)
3401 if (r2->wm[level].enable)
3405 if (level1 == level2) {
3406 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3410 } else if (level1 > level2) {
3417 /* dirty bits used to track which watermarks need changes */
3418 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3419 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
3420 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3421 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3422 #define WM_DIRTY_FBC (1 << 24)
3423 #define WM_DIRTY_DDB (1 << 25)
3425 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3426 const struct ilk_wm_values *old,
3427 const struct ilk_wm_values *new)
3429 unsigned int dirty = 0;
3433 for_each_pipe(dev_priv, pipe) {
3434 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
3435 dirty |= WM_DIRTY_LINETIME(pipe);
3436 /* Must disable LP1+ watermarks too */
3437 dirty |= WM_DIRTY_LP_ALL;
3440 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3441 dirty |= WM_DIRTY_PIPE(pipe);
3442 /* Must disable LP1+ watermarks too */
3443 dirty |= WM_DIRTY_LP_ALL;
3447 if (old->enable_fbc_wm != new->enable_fbc_wm) {
3448 dirty |= WM_DIRTY_FBC;
3449 /* Must disable LP1+ watermarks too */
3450 dirty |= WM_DIRTY_LP_ALL;
3453 if (old->partitioning != new->partitioning) {
3454 dirty |= WM_DIRTY_DDB;
3455 /* Must disable LP1+ watermarks too */
3456 dirty |= WM_DIRTY_LP_ALL;
3459 /* LP1+ watermarks already deemed dirty, no need to continue */
3460 if (dirty & WM_DIRTY_LP_ALL)
3463 /* Find the lowest numbered LP1+ watermark in need of an update... */
3464 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3465 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3466 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3470 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3471 for (; wm_lp <= 3; wm_lp++)
3472 dirty |= WM_DIRTY_LP(wm_lp);
3477 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3480 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3481 bool changed = false;
3483 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
3484 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
3485 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
3488 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
3489 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
3490 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
3493 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
3494 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
3495 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
3500 * Don't touch WM1S_LP_EN here.
3501 * Doing so could cause underruns.
3508 * The spec says we shouldn't write when we don't need, because every write
3509 * causes WMs to be re-evaluated, expending some power.
3511 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3512 struct ilk_wm_values *results)
3514 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3518 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3522 _ilk_disable_lp_wm(dev_priv, dirty);
3524 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3525 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
3526 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3527 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
3528 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3529 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
3531 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
3532 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
3533 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
3534 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
3535 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
3536 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
3538 if (dirty & WM_DIRTY_DDB) {
3539 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3540 val = I915_READ(WM_MISC);
3541 if (results->partitioning == INTEL_DDB_PART_1_2)
3542 val &= ~WM_MISC_DATA_PARTITION_5_6;
3544 val |= WM_MISC_DATA_PARTITION_5_6;
3545 I915_WRITE(WM_MISC, val);
3547 val = I915_READ(DISP_ARB_CTL2);
3548 if (results->partitioning == INTEL_DDB_PART_1_2)
3549 val &= ~DISP_DATA_PARTITION_5_6;
3551 val |= DISP_DATA_PARTITION_5_6;
3552 I915_WRITE(DISP_ARB_CTL2, val);
3556 if (dirty & WM_DIRTY_FBC) {
3557 val = I915_READ(DISP_ARB_CTL);
3558 if (results->enable_fbc_wm)
3559 val &= ~DISP_FBC_WM_DIS;
3561 val |= DISP_FBC_WM_DIS;
3562 I915_WRITE(DISP_ARB_CTL, val);
3565 if (dirty & WM_DIRTY_LP(1) &&
3566 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3567 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
3569 if (INTEL_GEN(dev_priv) >= 7) {
3570 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3571 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
3572 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3573 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
3576 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3577 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
3578 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3579 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
3580 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3581 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
3583 dev_priv->wm.hw = *results;
3586 bool ilk_disable_lp_wm(struct drm_device *dev)
3588 struct drm_i915_private *dev_priv = to_i915(dev);
3590 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3594 * FIXME: We still don't have the proper code detect if we need to apply the WA,
3595 * so assume we'll always need it in order to avoid underruns.
3597 static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
3599 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3601 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
3608 intel_has_sagv(struct drm_i915_private *dev_priv)
3610 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
3611 IS_CANNONLAKE(dev_priv))
3614 if (IS_SKYLAKE(dev_priv) &&
3615 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
3622 * SAGV dynamically adjusts the system agent voltage and clock frequencies
3623 * depending on power and performance requirements. The display engine access
3624 * to system memory is blocked during the adjustment time. Because of the
3625 * blocking time, having this enabled can cause full system hangs and/or pipe
3626 * underruns if we don't meet all of the following requirements:
3628 * - <= 1 pipe enabled
3629 * - All planes can enable watermarks for latencies >= SAGV engine block time
3630 * - We're not using an interlaced display configuration
3633 intel_enable_sagv(struct drm_i915_private *dev_priv)
3637 if (!intel_has_sagv(dev_priv))
3640 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3643 DRM_DEBUG_KMS("Enabling the SAGV\n");
3644 mutex_lock(&dev_priv->rps.hw_lock);
3646 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3649 /* We don't need to wait for the SAGV when enabling */
3650 mutex_unlock(&dev_priv->rps.hw_lock);
3653 * Some skl systems, pre-release machines in particular,
3654 * don't actually have an SAGV.
3656 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3657 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3658 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3660 } else if (ret < 0) {
3661 DRM_ERROR("Failed to enable the SAGV\n");
3665 dev_priv->sagv_status = I915_SAGV_ENABLED;
3670 intel_disable_sagv(struct drm_i915_private *dev_priv)
3674 if (!intel_has_sagv(dev_priv))
3677 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3680 DRM_DEBUG_KMS("Disabling the SAGV\n");
3681 mutex_lock(&dev_priv->rps.hw_lock);
3683 /* bspec says to keep retrying for at least 1 ms */
3684 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3686 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3688 mutex_unlock(&dev_priv->rps.hw_lock);
3691 * Some skl systems, pre-release machines in particular,
3692 * don't actually have an SAGV.
3694 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3695 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3696 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3698 } else if (ret < 0) {
3699 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
3703 dev_priv->sagv_status = I915_SAGV_DISABLED;
3707 bool intel_can_enable_sagv(struct drm_atomic_state *state)
3709 struct drm_device *dev = state->dev;
3710 struct drm_i915_private *dev_priv = to_i915(dev);
3711 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3712 struct intel_crtc *crtc;
3713 struct intel_plane *plane;
3714 struct intel_crtc_state *cstate;
3717 int sagv_block_time_us = IS_GEN9(dev_priv) ? 30 : 20;
3719 if (!intel_has_sagv(dev_priv))
3723 * SKL+ workaround: bspec recommends we disable the SAGV when we have
3724 * more then one pipe enabled
3726 * If there are no active CRTCs, no additional checks need be performed
3728 if (hweight32(intel_state->active_crtcs) == 0)
3730 else if (hweight32(intel_state->active_crtcs) > 1)
3733 /* Since we're now guaranteed to only have one active CRTC... */
3734 pipe = ffs(intel_state->active_crtcs) - 1;
3735 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3736 cstate = to_intel_crtc_state(crtc->base.state);
3738 if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3741 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3742 struct skl_plane_wm *wm =
3743 &cstate->wm.skl.optimal.planes[plane->id];
3745 /* Skip this plane if it's not enabled */
3746 if (!wm->wm[0].plane_en)
3749 /* Find the highest enabled wm level for this plane */
3750 for (level = ilk_wm_max_level(dev_priv);
3751 !wm->wm[level].plane_en; --level)
3754 latency = dev_priv->wm.skl_latency[level];
3756 if (skl_needs_memory_bw_wa(intel_state) &&
3757 plane->base.state->fb->modifier ==
3758 I915_FORMAT_MOD_X_TILED)
3762 * If any of the planes on this pipe don't enable wm levels that
3763 * incur memory latencies higher than sagv_block_time_us we
3764 * can't enable the SAGV.
3766 if (latency < sagv_block_time_us)
3774 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3775 const struct intel_crtc_state *cstate,
3776 struct skl_ddb_entry *alloc, /* out */
3777 int *num_active /* out */)
3779 struct drm_atomic_state *state = cstate->base.state;
3780 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3781 struct drm_i915_private *dev_priv = to_i915(dev);
3782 struct drm_crtc *for_crtc = cstate->base.crtc;
3783 unsigned int pipe_size, ddb_size;
3784 int nth_active_pipe;
3786 if (WARN_ON(!state) || !cstate->base.active) {
3789 *num_active = hweight32(dev_priv->active_crtcs);
3793 if (intel_state->active_pipe_changes)
3794 *num_active = hweight32(intel_state->active_crtcs);
3796 *num_active = hweight32(dev_priv->active_crtcs);
3798 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3799 WARN_ON(ddb_size == 0);
3801 ddb_size -= 4; /* 4 blocks for bypass path allocation */
3804 * If the state doesn't change the active CRTC's, then there's
3805 * no need to recalculate; the existing pipe allocation limits
3806 * should remain unchanged. Note that we're safe from racing
3807 * commits since any racing commit that changes the active CRTC
3808 * list would need to grab _all_ crtc locks, including the one
3809 * we currently hold.
3811 if (!intel_state->active_pipe_changes) {
3813 * alloc may be cleared by clear_intel_crtc_state,
3814 * copy from old state to be sure
3816 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3820 nth_active_pipe = hweight32(intel_state->active_crtcs &
3821 (drm_crtc_mask(for_crtc) - 1));
3822 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
3823 alloc->start = nth_active_pipe * ddb_size / *num_active;
3824 alloc->end = alloc->start + pipe_size;
3827 static unsigned int skl_cursor_allocation(int num_active)
3829 if (num_active == 1)
3835 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
3837 entry->start = reg & 0x3ff;
3838 entry->end = (reg >> 16) & 0x3ff;
3843 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3844 struct skl_ddb_allocation *ddb /* out */)
3846 struct intel_crtc *crtc;
3848 memset(ddb, 0, sizeof(*ddb));
3850 for_each_intel_crtc(&dev_priv->drm, crtc) {
3851 enum intel_display_power_domain power_domain;
3852 enum plane_id plane_id;
3853 enum pipe pipe = crtc->pipe;
3855 power_domain = POWER_DOMAIN_PIPE(pipe);
3856 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3859 for_each_plane_id_on_crtc(crtc, plane_id) {
3862 if (plane_id != PLANE_CURSOR)
3863 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
3865 val = I915_READ(CUR_BUF_CFG(pipe));
3867 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
3870 intel_display_power_put(dev_priv, power_domain);
3875 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3876 * The bspec defines downscale amount as:
3879 * Horizontal down scale amount = maximum[1, Horizontal source size /
3880 * Horizontal destination size]
3881 * Vertical down scale amount = maximum[1, Vertical source size /
3882 * Vertical destination size]
3883 * Total down scale amount = Horizontal down scale amount *
3884 * Vertical down scale amount
3887 * Return value is provided in 16.16 fixed point form to retain fractional part.
3888 * Caller should take care of dividing & rounding off the value.
3890 static uint_fixed_16_16_t
3891 skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
3892 const struct intel_plane_state *pstate)
3894 struct intel_plane *plane = to_intel_plane(pstate->base.plane);
3895 uint32_t src_w, src_h, dst_w, dst_h;
3896 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
3897 uint_fixed_16_16_t downscale_h, downscale_w;
3899 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
3900 return u32_to_fixed16(0);
3902 /* n.b., src is 16.16 fixed point, dst is whole integer */
3903 if (plane->id == PLANE_CURSOR) {
3905 * Cursors only support 0/180 degree rotation,
3906 * hence no need to account for rotation here.
3908 src_w = pstate->base.src_w >> 16;
3909 src_h = pstate->base.src_h >> 16;
3910 dst_w = pstate->base.crtc_w;
3911 dst_h = pstate->base.crtc_h;
3914 * Src coordinates are already rotated by 270 degrees for
3915 * the 90/270 degree plane rotation cases (to match the
3916 * GTT mapping), hence no need to account for rotation here.
3918 src_w = drm_rect_width(&pstate->base.src) >> 16;
3919 src_h = drm_rect_height(&pstate->base.src) >> 16;
3920 dst_w = drm_rect_width(&pstate->base.dst);
3921 dst_h = drm_rect_height(&pstate->base.dst);
3924 fp_w_ratio = div_fixed16(src_w, dst_w);
3925 fp_h_ratio = div_fixed16(src_h, dst_h);
3926 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
3927 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
3929 return mul_fixed16(downscale_w, downscale_h);
3932 static uint_fixed_16_16_t
3933 skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
3935 uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
3937 if (!crtc_state->base.enable)
3938 return pipe_downscale;
3940 if (crtc_state->pch_pfit.enabled) {
3941 uint32_t src_w, src_h, dst_w, dst_h;
3942 uint32_t pfit_size = crtc_state->pch_pfit.size;
3943 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
3944 uint_fixed_16_16_t downscale_h, downscale_w;
3946 src_w = crtc_state->pipe_src_w;
3947 src_h = crtc_state->pipe_src_h;
3948 dst_w = pfit_size >> 16;
3949 dst_h = pfit_size & 0xffff;
3951 if (!dst_w || !dst_h)
3952 return pipe_downscale;
3954 fp_w_ratio = div_fixed16(src_w, dst_w);
3955 fp_h_ratio = div_fixed16(src_h, dst_h);
3956 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
3957 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
3959 pipe_downscale = mul_fixed16(downscale_w, downscale_h);
3962 return pipe_downscale;
3965 int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
3966 struct intel_crtc_state *cstate)
3968 struct drm_crtc_state *crtc_state = &cstate->base;
3969 struct drm_atomic_state *state = crtc_state->state;
3970 struct drm_plane *plane;
3971 const struct drm_plane_state *pstate;
3972 struct intel_plane_state *intel_pstate;
3973 int crtc_clock, dotclk;
3974 uint32_t pipe_max_pixel_rate;
3975 uint_fixed_16_16_t pipe_downscale;
3976 uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
3978 if (!cstate->base.enable)
3981 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
3982 uint_fixed_16_16_t plane_downscale;
3983 uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
3986 if (!intel_wm_plane_visible(cstate,
3987 to_intel_plane_state(pstate)))
3990 if (WARN_ON(!pstate->fb))
3993 intel_pstate = to_intel_plane_state(pstate);
3994 plane_downscale = skl_plane_downscale_amount(cstate,
3996 bpp = pstate->fb->format->cpp[0] * 8;
3998 plane_downscale = mul_fixed16(plane_downscale,
4001 max_downscale = max_fixed16(plane_downscale, max_downscale);
4003 pipe_downscale = skl_pipe_downscale_amount(cstate);
4005 pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
4007 crtc_clock = crtc_state->adjusted_mode.crtc_clock;
4008 dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
4010 if (IS_GEMINILAKE(to_i915(intel_crtc->base.dev)))
4013 pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
4015 if (pipe_max_pixel_rate < crtc_clock) {
4016 DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
4024 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
4025 const struct drm_plane_state *pstate,
4028 struct intel_plane *plane = to_intel_plane(pstate->plane);
4029 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
4031 uint32_t width = 0, height = 0;
4032 struct drm_framebuffer *fb;
4034 uint_fixed_16_16_t down_scale_amount;
4036 if (!intel_pstate->base.visible)
4040 format = fb->format->format;
4042 if (plane->id == PLANE_CURSOR)
4044 if (y && format != DRM_FORMAT_NV12)
4048 * Src coordinates are already rotated by 270 degrees for
4049 * the 90/270 degree plane rotation cases (to match the
4050 * GTT mapping), hence no need to account for rotation here.
4052 width = drm_rect_width(&intel_pstate->base.src) >> 16;
4053 height = drm_rect_height(&intel_pstate->base.src) >> 16;
4055 /* for planar format */
4056 if (format == DRM_FORMAT_NV12) {
4057 if (y) /* y-plane data rate */
4058 data_rate = width * height *
4060 else /* uv-plane data rate */
4061 data_rate = (width / 2) * (height / 2) *
4064 /* for packed formats */
4065 data_rate = width * height * fb->format->cpp[0];
4068 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
4070 return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4074 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
4075 * a 8192x4096@32bpp framebuffer:
4076 * 3 * 4096 * 8192 * 4 < 2^32
4079 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4080 unsigned *plane_data_rate,
4081 unsigned *plane_y_data_rate)
4083 struct drm_crtc_state *cstate = &intel_cstate->base;
4084 struct drm_atomic_state *state = cstate->state;
4085 struct drm_plane *plane;
4086 const struct drm_plane_state *pstate;
4087 unsigned int total_data_rate = 0;
4089 if (WARN_ON(!state))
4092 /* Calculate and cache data rate for each plane */
4093 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4094 enum plane_id plane_id = to_intel_plane(plane)->id;
4098 rate = skl_plane_relative_data_rate(intel_cstate,
4100 plane_data_rate[plane_id] = rate;
4102 total_data_rate += rate;
4105 rate = skl_plane_relative_data_rate(intel_cstate,
4107 plane_y_data_rate[plane_id] = rate;
4109 total_data_rate += rate;
4112 return total_data_rate;
4116 skl_ddb_min_alloc(const struct drm_plane_state *pstate,
4119 struct drm_framebuffer *fb = pstate->fb;
4120 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
4121 uint32_t src_w, src_h;
4122 uint32_t min_scanlines = 8;
4128 /* For packed formats, no y-plane, return 0 */
4129 if (y && fb->format->format != DRM_FORMAT_NV12)
4132 /* For Non Y-tile return 8-blocks */
4133 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
4134 fb->modifier != I915_FORMAT_MOD_Yf_TILED &&
4135 fb->modifier != I915_FORMAT_MOD_Y_TILED_CCS &&
4136 fb->modifier != I915_FORMAT_MOD_Yf_TILED_CCS)
4140 * Src coordinates are already rotated by 270 degrees for
4141 * the 90/270 degree plane rotation cases (to match the
4142 * GTT mapping), hence no need to account for rotation here.
4144 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
4145 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
4147 /* Halve UV plane width and height for NV12 */
4148 if (fb->format->format == DRM_FORMAT_NV12 && !y) {
4153 if (fb->format->format == DRM_FORMAT_NV12 && !y)
4154 plane_bpp = fb->format->cpp[1];
4156 plane_bpp = fb->format->cpp[0];
4158 if (drm_rotation_90_or_270(pstate->rotation)) {
4159 switch (plane_bpp) {
4173 WARN(1, "Unsupported pixel depth %u for rotation",
4179 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
4183 skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
4184 uint16_t *minimum, uint16_t *y_minimum)
4186 const struct drm_plane_state *pstate;
4187 struct drm_plane *plane;
4189 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
4190 enum plane_id plane_id = to_intel_plane(plane)->id;
4192 if (plane_id == PLANE_CURSOR)
4195 if (!pstate->visible)
4198 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
4199 y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4202 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
4206 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4207 struct skl_ddb_allocation *ddb /* out */)
4209 struct drm_atomic_state *state = cstate->base.state;
4210 struct drm_crtc *crtc = cstate->base.crtc;
4211 struct drm_device *dev = crtc->dev;
4212 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4213 enum pipe pipe = intel_crtc->pipe;
4214 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
4215 uint16_t alloc_size, start;
4216 uint16_t minimum[I915_MAX_PLANES] = {};
4217 uint16_t y_minimum[I915_MAX_PLANES] = {};
4218 unsigned int total_data_rate;
4219 enum plane_id plane_id;
4221 unsigned plane_data_rate[I915_MAX_PLANES] = {};
4222 unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
4223 uint16_t total_min_blocks = 0;
4225 /* Clear the partitioning for disabled planes. */
4226 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
4227 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
4229 if (WARN_ON(!state))
4232 if (!cstate->base.active) {
4233 alloc->start = alloc->end = 0;
4237 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
4238 alloc_size = skl_ddb_entry_size(alloc);
4239 if (alloc_size == 0)
4242 skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
4245 * 1. Allocate the mininum required blocks for each active plane
4246 * and allocate the cursor, it doesn't require extra allocation
4247 * proportional to the data rate.
4250 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4251 total_min_blocks += minimum[plane_id];
4252 total_min_blocks += y_minimum[plane_id];
4255 if (total_min_blocks > alloc_size) {
4256 DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
4257 DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks,
4262 alloc_size -= total_min_blocks;
4263 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
4264 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
4267 * 2. Distribute the remaining space in proportion to the amount of
4268 * data each plane needs to fetch from memory.
4270 * FIXME: we may not allocate every single block here.
4272 total_data_rate = skl_get_total_relative_data_rate(cstate,
4275 if (total_data_rate == 0)
4278 start = alloc->start;
4279 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4280 unsigned int data_rate, y_data_rate;
4281 uint16_t plane_blocks, y_plane_blocks = 0;
4283 if (plane_id == PLANE_CURSOR)
4286 data_rate = plane_data_rate[plane_id];
4289 * allocation for (packed formats) or (uv-plane part of planar format):
4290 * promote the expression to 64 bits to avoid overflowing, the
4291 * result is < available as data_rate / total_data_rate < 1
4293 plane_blocks = minimum[plane_id];
4294 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
4297 /* Leave disabled planes at (0,0) */
4299 ddb->plane[pipe][plane_id].start = start;
4300 ddb->plane[pipe][plane_id].end = start + plane_blocks;
4303 start += plane_blocks;
4306 * allocation for y_plane part of planar format:
4308 y_data_rate = plane_y_data_rate[plane_id];
4310 y_plane_blocks = y_minimum[plane_id];
4311 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
4315 ddb->y_plane[pipe][plane_id].start = start;
4316 ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks;
4319 start += y_plane_blocks;
4326 * The max latency should be 257 (max the punit can code is 255 and we add 2us
4327 * for the read latency) and cpp should always be <= 8, so that
4328 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
4329 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
4331 static uint_fixed_16_16_t
4332 skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
4333 uint8_t cpp, uint32_t latency)
4335 uint32_t wm_intermediate_val;
4336 uint_fixed_16_16_t ret;
4339 return FP_16_16_MAX;
4341 wm_intermediate_val = latency * pixel_rate * cpp;
4342 ret = div_fixed16(wm_intermediate_val, 1000 * 512);
4344 if (INTEL_GEN(dev_priv) >= 10)
4345 ret = add_fixed16_u32(ret, 1);
4350 static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
4351 uint32_t pipe_htotal,
4353 uint_fixed_16_16_t plane_blocks_per_line)
4355 uint32_t wm_intermediate_val;
4356 uint_fixed_16_16_t ret;
4359 return FP_16_16_MAX;
4361 wm_intermediate_val = latency * pixel_rate;
4362 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
4363 pipe_htotal * 1000);
4364 ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
4368 static uint_fixed_16_16_t
4369 intel_get_linetime_us(struct intel_crtc_state *cstate)
4371 uint32_t pixel_rate;
4372 uint32_t crtc_htotal;
4373 uint_fixed_16_16_t linetime_us;
4375 if (!cstate->base.active)
4376 return u32_to_fixed16(0);
4378 pixel_rate = cstate->pixel_rate;
4380 if (WARN_ON(pixel_rate == 0))
4381 return u32_to_fixed16(0);
4383 crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
4384 linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
4390 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
4391 const struct intel_plane_state *pstate)
4393 uint64_t adjusted_pixel_rate;
4394 uint_fixed_16_16_t downscale_amount;
4396 /* Shouldn't reach here on disabled planes... */
4397 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
4401 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
4402 * with additional adjustments for plane-specific scaling.
4404 adjusted_pixel_rate = cstate->pixel_rate;
4405 downscale_amount = skl_plane_downscale_amount(cstate, pstate);
4407 return mul_round_up_u32_fixed16(adjusted_pixel_rate,
4411 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4412 struct intel_crtc_state *cstate,
4413 const struct intel_plane_state *intel_pstate,
4414 uint16_t ddb_allocation,
4416 uint16_t *out_blocks, /* out */
4417 uint8_t *out_lines, /* out */
4418 bool *enabled /* out */)
4420 struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
4421 const struct drm_plane_state *pstate = &intel_pstate->base;
4422 const struct drm_framebuffer *fb = pstate->fb;
4423 uint32_t latency = dev_priv->wm.skl_latency[level];
4424 uint_fixed_16_16_t method1, method2;
4425 uint_fixed_16_16_t plane_blocks_per_line;
4426 uint_fixed_16_16_t selected_result;
4427 uint32_t interm_pbpl;
4428 uint32_t plane_bytes_per_line;
4429 uint32_t res_blocks, res_lines;
4432 uint32_t plane_pixel_rate;
4433 uint_fixed_16_16_t y_tile_minimum;
4434 uint32_t y_min_scanlines;
4435 struct intel_atomic_state *state =
4436 to_intel_atomic_state(cstate->base.state);
4437 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
4438 bool y_tiled, x_tiled;
4441 !intel_wm_plane_visible(cstate, intel_pstate)) {
4446 y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
4447 fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
4448 fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4449 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4450 x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
4452 /* Display WA #1141: kbl,cfl */
4453 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
4454 dev_priv->ipc_enabled)
4457 if (apply_memory_bw_wa && x_tiled)
4460 if (plane->id == PLANE_CURSOR) {
4461 width = intel_pstate->base.crtc_w;
4464 * Src coordinates are already rotated by 270 degrees for
4465 * the 90/270 degree plane rotation cases (to match the
4466 * GTT mapping), hence no need to account for rotation here.
4468 width = drm_rect_width(&intel_pstate->base.src) >> 16;
4471 cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
4473 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
4475 if (drm_rotation_90_or_270(pstate->rotation)) {
4479 y_min_scanlines = 16;
4482 y_min_scanlines = 8;
4485 y_min_scanlines = 4;
4492 y_min_scanlines = 4;
4495 if (apply_memory_bw_wa)
4496 y_min_scanlines *= 2;
4498 plane_bytes_per_line = width * cpp;
4500 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
4501 y_min_scanlines, 512);
4503 if (INTEL_GEN(dev_priv) >= 10)
4506 plane_blocks_per_line = div_fixed16(interm_pbpl,
4508 } else if (x_tiled && INTEL_GEN(dev_priv) == 9) {
4509 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
4510 plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4512 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
4513 plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4516 method1 = skl_wm_method1(dev_priv, plane_pixel_rate, cpp, latency);
4517 method2 = skl_wm_method2(plane_pixel_rate,
4518 cstate->base.adjusted_mode.crtc_htotal,
4520 plane_blocks_per_line);
4522 y_tile_minimum = mul_u32_fixed16(y_min_scanlines,
4523 plane_blocks_per_line);
4526 selected_result = max_fixed16(method2, y_tile_minimum);
4528 uint32_t linetime_us;
4530 linetime_us = fixed16_to_u32_round_up(
4531 intel_get_linetime_us(cstate));
4532 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
4533 (plane_bytes_per_line / 512 < 1))
4534 selected_result = method2;
4535 else if (ddb_allocation >=
4536 fixed16_to_u32_round_up(plane_blocks_per_line))
4537 selected_result = min_fixed16(method1, method2);
4538 else if (latency >= linetime_us)
4539 selected_result = min_fixed16(method1, method2);
4541 selected_result = method1;
4544 res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
4545 res_lines = div_round_up_fixed16(selected_result,
4546 plane_blocks_per_line);
4548 /* Display WA #1125: skl,bxt,kbl,glk */
4550 (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4551 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
4552 res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
4554 /* Display WA #1126: skl,bxt,kbl,glk */
4555 if (level >= 1 && level <= 7) {
4557 res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
4558 res_lines += y_min_scanlines;
4564 if (res_blocks >= ddb_allocation || res_lines > 31) {
4568 * If there are no valid level 0 watermarks, then we can't
4569 * support this display configuration.
4574 struct drm_plane *plane = pstate->plane;
4576 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
4577 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
4578 plane->base.id, plane->name,
4579 res_blocks, ddb_allocation, res_lines);
4584 *out_blocks = res_blocks;
4585 *out_lines = res_lines;
4592 skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4593 struct skl_ddb_allocation *ddb,
4594 struct intel_crtc_state *cstate,
4595 const struct intel_plane_state *intel_pstate,
4596 struct skl_plane_wm *wm)
4598 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4599 struct drm_plane *plane = intel_pstate->base.plane;
4600 struct intel_plane *intel_plane = to_intel_plane(plane);
4601 uint16_t ddb_blocks;
4602 enum pipe pipe = intel_crtc->pipe;
4603 int level, max_level = ilk_wm_max_level(dev_priv);
4606 if (WARN_ON(!intel_pstate->base.fb))
4609 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]);
4611 for (level = 0; level <= max_level; level++) {
4612 struct skl_wm_level *result = &wm->wm[level];
4614 ret = skl_compute_plane_wm(dev_priv,
4619 &result->plane_res_b,
4620 &result->plane_res_l,
4630 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
4632 struct drm_atomic_state *state = cstate->base.state;
4633 struct drm_i915_private *dev_priv = to_i915(state->dev);
4634 uint_fixed_16_16_t linetime_us;
4635 uint32_t linetime_wm;
4637 linetime_us = intel_get_linetime_us(cstate);
4639 if (is_fixed16_zero(linetime_us))
4642 linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
4644 /* Display WA #1135: bxt. */
4645 if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
4646 linetime_wm = DIV_ROUND_UP(linetime_wm, 2);
4651 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4652 struct skl_wm_level *trans_wm /* out */)
4654 if (!cstate->base.active)
4657 /* Until we know more, just disable transition WMs */
4658 trans_wm->plane_en = false;
4661 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
4662 struct skl_ddb_allocation *ddb,
4663 struct skl_pipe_wm *pipe_wm)
4665 struct drm_device *dev = cstate->base.crtc->dev;
4666 struct drm_crtc_state *crtc_state = &cstate->base;
4667 const struct drm_i915_private *dev_priv = to_i915(dev);
4668 struct drm_plane *plane;
4669 const struct drm_plane_state *pstate;
4670 struct skl_plane_wm *wm;
4674 * We'll only calculate watermarks for planes that are actually
4675 * enabled, so make sure all other planes are set as disabled.
4677 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
4679 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
4680 const struct intel_plane_state *intel_pstate =
4681 to_intel_plane_state(pstate);
4682 enum plane_id plane_id = to_intel_plane(plane)->id;
4684 wm = &pipe_wm->planes[plane_id];
4686 ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
4690 skl_compute_transition_wm(cstate, &wm->trans_wm);
4692 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
4697 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
4699 const struct skl_ddb_entry *entry)
4702 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
4707 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
4709 const struct skl_wm_level *level)
4713 if (level->plane_en) {
4715 val |= level->plane_res_b;
4716 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
4719 I915_WRITE(reg, val);
4722 static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
4723 const struct skl_plane_wm *wm,
4724 const struct skl_ddb_allocation *ddb,
4725 enum plane_id plane_id)
4727 struct drm_crtc *crtc = &intel_crtc->base;
4728 struct drm_device *dev = crtc->dev;
4729 struct drm_i915_private *dev_priv = to_i915(dev);
4730 int level, max_level = ilk_wm_max_level(dev_priv);
4731 enum pipe pipe = intel_crtc->pipe;
4733 for (level = 0; level <= max_level; level++) {
4734 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
4737 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
4740 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
4741 &ddb->plane[pipe][plane_id]);
4742 skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id),
4743 &ddb->y_plane[pipe][plane_id]);
4746 static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
4747 const struct skl_plane_wm *wm,
4748 const struct skl_ddb_allocation *ddb)
4750 struct drm_crtc *crtc = &intel_crtc->base;
4751 struct drm_device *dev = crtc->dev;
4752 struct drm_i915_private *dev_priv = to_i915(dev);
4753 int level, max_level = ilk_wm_max_level(dev_priv);
4754 enum pipe pipe = intel_crtc->pipe;
4756 for (level = 0; level <= max_level; level++) {
4757 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
4760 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
4762 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
4763 &ddb->plane[pipe][PLANE_CURSOR]);
4766 bool skl_wm_level_equals(const struct skl_wm_level *l1,
4767 const struct skl_wm_level *l2)
4769 if (l1->plane_en != l2->plane_en)
4772 /* If both planes aren't enabled, the rest shouldn't matter */
4776 return (l1->plane_res_l == l2->plane_res_l &&
4777 l1->plane_res_b == l2->plane_res_b);
4780 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
4781 const struct skl_ddb_entry *b)
4783 return a->start < b->end && b->start < a->end;
4786 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
4787 const struct skl_ddb_entry *ddb,
4792 for (i = 0; i < I915_MAX_PIPES; i++)
4793 if (i != ignore && entries[i] &&
4794 skl_ddb_entries_overlap(ddb, entries[i]))
4800 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
4801 const struct skl_pipe_wm *old_pipe_wm,
4802 struct skl_pipe_wm *pipe_wm, /* out */
4803 struct skl_ddb_allocation *ddb, /* out */
4804 bool *changed /* out */)
4806 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
4809 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
4813 if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
4822 pipes_modified(struct drm_atomic_state *state)
4824 struct drm_crtc *crtc;
4825 struct drm_crtc_state *cstate;
4826 uint32_t i, ret = 0;
4828 for_each_new_crtc_in_state(state, crtc, cstate, i)
4829 ret |= drm_crtc_mask(crtc);
4835 skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
4837 struct drm_atomic_state *state = cstate->base.state;
4838 struct drm_device *dev = state->dev;
4839 struct drm_crtc *crtc = cstate->base.crtc;
4840 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4841 struct drm_i915_private *dev_priv = to_i915(dev);
4842 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4843 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
4844 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
4845 struct drm_plane_state *plane_state;
4846 struct drm_plane *plane;
4847 enum pipe pipe = intel_crtc->pipe;
4849 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
4851 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
4852 enum plane_id plane_id = to_intel_plane(plane)->id;
4854 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
4855 &new_ddb->plane[pipe][plane_id]) &&
4856 skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id],
4857 &new_ddb->y_plane[pipe][plane_id]))
4860 plane_state = drm_atomic_get_plane_state(state, plane);
4861 if (IS_ERR(plane_state))
4862 return PTR_ERR(plane_state);
4869 skl_compute_ddb(struct drm_atomic_state *state)
4871 struct drm_device *dev = state->dev;
4872 struct drm_i915_private *dev_priv = to_i915(dev);
4873 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4874 struct intel_crtc *intel_crtc;
4875 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
4876 uint32_t realloc_pipes = pipes_modified(state);
4880 * If this is our first atomic update following hardware readout,
4881 * we can't trust the DDB that the BIOS programmed for us. Let's
4882 * pretend that all pipes switched active status so that we'll
4883 * ensure a full DDB recompute.
4885 if (dev_priv->wm.distrust_bios_wm) {
4886 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4887 state->acquire_ctx);
4891 intel_state->active_pipe_changes = ~0;
4894 * We usually only initialize intel_state->active_crtcs if we
4895 * we're doing a modeset; make sure this field is always
4896 * initialized during the sanitization process that happens
4897 * on the first commit too.
4899 if (!intel_state->modeset)
4900 intel_state->active_crtcs = dev_priv->active_crtcs;
4904 * If the modeset changes which CRTC's are active, we need to
4905 * recompute the DDB allocation for *all* active pipes, even
4906 * those that weren't otherwise being modified in any way by this
4907 * atomic commit. Due to the shrinking of the per-pipe allocations
4908 * when new active CRTC's are added, it's possible for a pipe that
4909 * we were already using and aren't changing at all here to suddenly
4910 * become invalid if its DDB needs exceeds its new allocation.
4912 * Note that if we wind up doing a full DDB recompute, we can't let
4913 * any other display updates race with this transaction, so we need
4914 * to grab the lock on *all* CRTC's.
4916 if (intel_state->active_pipe_changes) {
4918 intel_state->wm_results.dirty_pipes = ~0;
4922 * We're not recomputing for the pipes not included in the commit, so
4923 * make sure we start with the current state.
4925 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
4927 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
4928 struct intel_crtc_state *cstate;
4930 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
4932 return PTR_ERR(cstate);
4934 ret = skl_allocate_pipe_ddb(cstate, ddb);
4938 ret = skl_ddb_add_affected_planes(cstate);
4947 skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4948 struct skl_wm_values *src,
4951 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4952 sizeof(dst->ddb.y_plane[pipe]));
4953 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4954 sizeof(dst->ddb.plane[pipe]));
4958 skl_print_wm_changes(const struct drm_atomic_state *state)
4960 const struct drm_device *dev = state->dev;
4961 const struct drm_i915_private *dev_priv = to_i915(dev);
4962 const struct intel_atomic_state *intel_state =
4963 to_intel_atomic_state(state);
4964 const struct drm_crtc *crtc;
4965 const struct drm_crtc_state *cstate;
4966 const struct intel_plane *intel_plane;
4967 const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
4968 const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
4971 for_each_new_crtc_in_state(state, crtc, cstate, i) {
4972 const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4973 enum pipe pipe = intel_crtc->pipe;
4975 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
4976 enum plane_id plane_id = intel_plane->id;
4977 const struct skl_ddb_entry *old, *new;
4979 old = &old_ddb->plane[pipe][plane_id];
4980 new = &new_ddb->plane[pipe][plane_id];
4982 if (skl_ddb_entry_equal(old, new))
4985 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
4986 intel_plane->base.base.id,
4987 intel_plane->base.name,
4988 old->start, old->end,
4989 new->start, new->end);
4995 skl_compute_wm(struct drm_atomic_state *state)
4997 struct drm_crtc *crtc;
4998 struct drm_crtc_state *cstate;
4999 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5000 struct skl_wm_values *results = &intel_state->wm_results;
5001 struct drm_device *dev = state->dev;
5002 struct skl_pipe_wm *pipe_wm;
5003 bool changed = false;
5007 * When we distrust bios wm we always need to recompute to set the
5008 * expected DDB allocations for each CRTC.
5010 if (to_i915(dev)->wm.distrust_bios_wm)
5014 * If this transaction isn't actually touching any CRTC's, don't
5015 * bother with watermark calculation. Note that if we pass this
5016 * test, we're guaranteed to hold at least one CRTC state mutex,
5017 * which means we can safely use values like dev_priv->active_crtcs
5018 * since any racing commits that want to update them would need to
5019 * hold _all_ CRTC state mutexes.
5021 for_each_new_crtc_in_state(state, crtc, cstate, i)
5027 /* Clear all dirty flags */
5028 results->dirty_pipes = 0;
5030 ret = skl_compute_ddb(state);
5035 * Calculate WM's for all pipes that are part of this transaction.
5036 * Note that the DDB allocation above may have added more CRTC's that
5037 * weren't otherwise being modified (and set bits in dirty_pipes) if
5038 * pipe allocations had to change.
5040 * FIXME: Now that we're doing this in the atomic check phase, we
5041 * should allow skl_update_pipe_wm() to return failure in cases where
5042 * no suitable watermark values can be found.
5044 for_each_new_crtc_in_state(state, crtc, cstate, i) {
5045 struct intel_crtc_state *intel_cstate =
5046 to_intel_crtc_state(cstate);
5047 const struct skl_pipe_wm *old_pipe_wm =
5048 &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
5050 pipe_wm = &intel_cstate->wm.skl.optimal;
5051 ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
5052 &results->ddb, &changed);
5057 results->dirty_pipes |= drm_crtc_mask(crtc);
5059 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
5060 /* This pipe's WM's did not change */
5063 intel_cstate->update_wm_pre = true;
5066 skl_print_wm_changes(state);
5071 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
5072 struct intel_crtc_state *cstate)
5074 struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
5075 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5076 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
5077 const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
5078 enum pipe pipe = crtc->pipe;
5079 enum plane_id plane_id;
5081 if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
5084 I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
5086 for_each_plane_id_on_crtc(crtc, plane_id) {
5087 if (plane_id != PLANE_CURSOR)
5088 skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
5091 skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
5096 static void skl_initial_wm(struct intel_atomic_state *state,
5097 struct intel_crtc_state *cstate)
5099 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5100 struct drm_device *dev = intel_crtc->base.dev;
5101 struct drm_i915_private *dev_priv = to_i915(dev);
5102 struct skl_wm_values *results = &state->wm_results;
5103 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
5104 enum pipe pipe = intel_crtc->pipe;
5106 if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
5109 mutex_lock(&dev_priv->wm.wm_mutex);
5111 if (cstate->base.active_changed)
5112 skl_atomic_update_crtc_wm(state, cstate);
5114 skl_copy_wm_for_pipe(hw_vals, results, pipe);
5116 mutex_unlock(&dev_priv->wm.wm_mutex);
5119 static void ilk_compute_wm_config(struct drm_device *dev,
5120 struct intel_wm_config *config)
5122 struct intel_crtc *crtc;
5124 /* Compute the currently _active_ config */
5125 for_each_intel_crtc(dev, crtc) {
5126 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
5128 if (!wm->pipe_enabled)
5131 config->sprites_enabled |= wm->sprites_enabled;
5132 config->sprites_scaled |= wm->sprites_scaled;
5133 config->num_pipes_active++;
5137 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
5139 struct drm_device *dev = &dev_priv->drm;
5140 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
5141 struct ilk_wm_maximums max;
5142 struct intel_wm_config config = {};
5143 struct ilk_wm_values results = {};
5144 enum intel_ddb_partitioning partitioning;
5146 ilk_compute_wm_config(dev, &config);
5148 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
5149 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
5151 /* 5/6 split only in single pipe config on IVB+ */
5152 if (INTEL_GEN(dev_priv) >= 7 &&
5153 config.num_pipes_active == 1 && config.sprites_enabled) {
5154 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
5155 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
5157 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
5159 best_lp_wm = &lp_wm_1_2;
5162 partitioning = (best_lp_wm == &lp_wm_1_2) ?
5163 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
5165 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
5167 ilk_write_wm_values(dev_priv, &results);
5170 static void ilk_initial_watermarks(struct intel_atomic_state *state,
5171 struct intel_crtc_state *cstate)
5173 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
5174 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5176 mutex_lock(&dev_priv->wm.wm_mutex);
5177 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
5178 ilk_program_watermarks(dev_priv);
5179 mutex_unlock(&dev_priv->wm.wm_mutex);
5182 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
5183 struct intel_crtc_state *cstate)
5185 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
5186 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5188 mutex_lock(&dev_priv->wm.wm_mutex);
5189 if (cstate->wm.need_postvbl_update) {
5190 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
5191 ilk_program_watermarks(dev_priv);
5193 mutex_unlock(&dev_priv->wm.wm_mutex);
5196 static inline void skl_wm_level_from_reg_val(uint32_t val,
5197 struct skl_wm_level *level)
5199 level->plane_en = val & PLANE_WM_EN;
5200 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
5201 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
5202 PLANE_WM_LINES_MASK;
5205 void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
5206 struct skl_pipe_wm *out)
5208 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5209 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5210 enum pipe pipe = intel_crtc->pipe;
5211 int level, max_level;
5212 enum plane_id plane_id;
5215 max_level = ilk_wm_max_level(dev_priv);
5217 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
5218 struct skl_plane_wm *wm = &out->planes[plane_id];
5220 for (level = 0; level <= max_level; level++) {
5221 if (plane_id != PLANE_CURSOR)
5222 val = I915_READ(PLANE_WM(pipe, plane_id, level));
5224 val = I915_READ(CUR_WM(pipe, level));
5226 skl_wm_level_from_reg_val(val, &wm->wm[level]);
5229 if (plane_id != PLANE_CURSOR)
5230 val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
5232 val = I915_READ(CUR_WM_TRANS(pipe));
5234 skl_wm_level_from_reg_val(val, &wm->trans_wm);
5237 if (!intel_crtc->active)
5240 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
5243 void skl_wm_get_hw_state(struct drm_device *dev)
5245 struct drm_i915_private *dev_priv = to_i915(dev);
5246 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
5247 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
5248 struct drm_crtc *crtc;
5249 struct intel_crtc *intel_crtc;
5250 struct intel_crtc_state *cstate;
5252 skl_ddb_get_hw_state(dev_priv, ddb);
5253 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5254 intel_crtc = to_intel_crtc(crtc);
5255 cstate = to_intel_crtc_state(crtc->state);
5257 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
5259 if (intel_crtc->active)
5260 hw->dirty_pipes |= drm_crtc_mask(crtc);
5263 if (dev_priv->active_crtcs) {
5264 /* Fully recompute DDB on first atomic commit */
5265 dev_priv->wm.distrust_bios_wm = true;
5267 /* Easy/common case; just sanitize DDB now if everything off */
5268 memset(ddb, 0, sizeof(*ddb));
5272 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
5274 struct drm_device *dev = crtc->dev;
5275 struct drm_i915_private *dev_priv = to_i915(dev);
5276 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5277 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5278 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
5279 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
5280 enum pipe pipe = intel_crtc->pipe;
5281 static const i915_reg_t wm0_pipe_reg[] = {
5282 [PIPE_A] = WM0_PIPEA_ILK,
5283 [PIPE_B] = WM0_PIPEB_ILK,
5284 [PIPE_C] = WM0_PIPEC_IVB,
5287 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
5288 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5289 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
5291 memset(active, 0, sizeof(*active));
5293 active->pipe_enabled = intel_crtc->active;
5295 if (active->pipe_enabled) {
5296 u32 tmp = hw->wm_pipe[pipe];
5299 * For active pipes LP0 watermark is marked as
5300 * enabled, and LP1+ watermaks as disabled since
5301 * we can't really reverse compute them in case
5302 * multiple pipes are active.
5304 active->wm[0].enable = true;
5305 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
5306 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
5307 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
5308 active->linetime = hw->wm_linetime[pipe];
5310 int level, max_level = ilk_wm_max_level(dev_priv);
5313 * For inactive pipes, all watermark levels
5314 * should be marked as enabled but zeroed,
5315 * which is what we'd compute them to.
5317 for (level = 0; level <= max_level; level++)
5318 active->wm[level].enable = true;
5321 intel_crtc->wm.active.ilk = *active;
5324 #define _FW_WM(value, plane) \
5325 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
5326 #define _FW_WM_VLV(value, plane) \
5327 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
5329 static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
5330 struct g4x_wm_values *wm)
5334 tmp = I915_READ(DSPFW1);
5335 wm->sr.plane = _FW_WM(tmp, SR);
5336 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5337 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
5338 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
5340 tmp = I915_READ(DSPFW2);
5341 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
5342 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
5343 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
5344 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
5345 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5346 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
5348 tmp = I915_READ(DSPFW3);
5349 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
5350 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5351 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
5352 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
5355 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
5356 struct vlv_wm_values *wm)
5361 for_each_pipe(dev_priv, pipe) {
5362 tmp = I915_READ(VLV_DDL(pipe));
5364 wm->ddl[pipe].plane[PLANE_PRIMARY] =
5365 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5366 wm->ddl[pipe].plane[PLANE_CURSOR] =
5367 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5368 wm->ddl[pipe].plane[PLANE_SPRITE0] =
5369 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5370 wm->ddl[pipe].plane[PLANE_SPRITE1] =
5371 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5374 tmp = I915_READ(DSPFW1);
5375 wm->sr.plane = _FW_WM(tmp, SR);
5376 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5377 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
5378 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
5380 tmp = I915_READ(DSPFW2);
5381 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
5382 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5383 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
5385 tmp = I915_READ(DSPFW3);
5386 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5388 if (IS_CHERRYVIEW(dev_priv)) {
5389 tmp = I915_READ(DSPFW7_CHV);
5390 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5391 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5393 tmp = I915_READ(DSPFW8_CHV);
5394 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
5395 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
5397 tmp = I915_READ(DSPFW9_CHV);
5398 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
5399 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
5401 tmp = I915_READ(DSPHOWM);
5402 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5403 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
5404 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
5405 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
5406 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5407 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5408 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5409 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5410 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5411 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5413 tmp = I915_READ(DSPFW7);
5414 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5415 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5417 tmp = I915_READ(DSPHOWM);
5418 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5419 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5420 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5421 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5422 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5423 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5424 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5431 void g4x_wm_get_hw_state(struct drm_device *dev)
5433 struct drm_i915_private *dev_priv = to_i915(dev);
5434 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
5435 struct intel_crtc *crtc;
5437 g4x_read_wm_values(dev_priv, wm);
5439 wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
5441 for_each_intel_crtc(dev, crtc) {
5442 struct intel_crtc_state *crtc_state =
5443 to_intel_crtc_state(crtc->base.state);
5444 struct g4x_wm_state *active = &crtc->wm.active.g4x;
5445 struct g4x_pipe_wm *raw;
5446 enum pipe pipe = crtc->pipe;
5447 enum plane_id plane_id;
5448 int level, max_level;
5450 active->cxsr = wm->cxsr;
5451 active->hpll_en = wm->hpll_en;
5452 active->fbc_en = wm->fbc_en;
5454 active->sr = wm->sr;
5455 active->hpll = wm->hpll;
5457 for_each_plane_id_on_crtc(crtc, plane_id) {
5458 active->wm.plane[plane_id] =
5459 wm->pipe[pipe].plane[plane_id];
5462 if (wm->cxsr && wm->hpll_en)
5463 max_level = G4X_WM_LEVEL_HPLL;
5465 max_level = G4X_WM_LEVEL_SR;
5467 max_level = G4X_WM_LEVEL_NORMAL;
5469 level = G4X_WM_LEVEL_NORMAL;
5470 raw = &crtc_state->wm.g4x.raw[level];
5471 for_each_plane_id_on_crtc(crtc, plane_id)
5472 raw->plane[plane_id] = active->wm.plane[plane_id];
5474 if (++level > max_level)
5477 raw = &crtc_state->wm.g4x.raw[level];
5478 raw->plane[PLANE_PRIMARY] = active->sr.plane;
5479 raw->plane[PLANE_CURSOR] = active->sr.cursor;
5480 raw->plane[PLANE_SPRITE0] = 0;
5481 raw->fbc = active->sr.fbc;
5483 if (++level > max_level)
5486 raw = &crtc_state->wm.g4x.raw[level];
5487 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
5488 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
5489 raw->plane[PLANE_SPRITE0] = 0;
5490 raw->fbc = active->hpll.fbc;
5493 for_each_plane_id_on_crtc(crtc, plane_id)
5494 g4x_raw_plane_wm_set(crtc_state, level,
5495 plane_id, USHRT_MAX);
5496 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
5498 crtc_state->wm.g4x.optimal = *active;
5499 crtc_state->wm.g4x.intermediate = *active;
5501 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
5503 wm->pipe[pipe].plane[PLANE_PRIMARY],
5504 wm->pipe[pipe].plane[PLANE_CURSOR],
5505 wm->pipe[pipe].plane[PLANE_SPRITE0]);
5508 DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
5509 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
5510 DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
5511 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
5512 DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
5513 yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
5516 void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
5518 struct intel_plane *plane;
5519 struct intel_crtc *crtc;
5521 mutex_lock(&dev_priv->wm.wm_mutex);
5523 for_each_intel_plane(&dev_priv->drm, plane) {
5524 struct intel_crtc *crtc =
5525 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
5526 struct intel_crtc_state *crtc_state =
5527 to_intel_crtc_state(crtc->base.state);
5528 struct intel_plane_state *plane_state =
5529 to_intel_plane_state(plane->base.state);
5530 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
5531 enum plane_id plane_id = plane->id;
5534 if (plane_state->base.visible)
5537 for (level = 0; level < 3; level++) {
5538 struct g4x_pipe_wm *raw =
5539 &crtc_state->wm.g4x.raw[level];
5541 raw->plane[plane_id] = 0;
5542 wm_state->wm.plane[plane_id] = 0;
5545 if (plane_id == PLANE_PRIMARY) {
5546 for (level = 0; level < 3; level++) {
5547 struct g4x_pipe_wm *raw =
5548 &crtc_state->wm.g4x.raw[level];
5552 wm_state->sr.fbc = 0;
5553 wm_state->hpll.fbc = 0;
5554 wm_state->fbc_en = false;
5558 for_each_intel_crtc(&dev_priv->drm, crtc) {
5559 struct intel_crtc_state *crtc_state =
5560 to_intel_crtc_state(crtc->base.state);
5562 crtc_state->wm.g4x.intermediate =
5563 crtc_state->wm.g4x.optimal;
5564 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
5567 g4x_program_watermarks(dev_priv);
5569 mutex_unlock(&dev_priv->wm.wm_mutex);
5572 void vlv_wm_get_hw_state(struct drm_device *dev)
5574 struct drm_i915_private *dev_priv = to_i915(dev);
5575 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
5576 struct intel_crtc *crtc;
5579 vlv_read_wm_values(dev_priv, wm);
5581 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
5582 wm->level = VLV_WM_LEVEL_PM2;
5584 if (IS_CHERRYVIEW(dev_priv)) {
5585 mutex_lock(&dev_priv->rps.hw_lock);
5587 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5588 if (val & DSP_MAXFIFO_PM5_ENABLE)
5589 wm->level = VLV_WM_LEVEL_PM5;
5592 * If DDR DVFS is disabled in the BIOS, Punit
5593 * will never ack the request. So if that happens
5594 * assume we don't have to enable/disable DDR DVFS
5595 * dynamically. To test that just set the REQ_ACK
5596 * bit to poke the Punit, but don't change the
5597 * HIGH/LOW bits so that we don't actually change
5598 * the current state.
5600 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
5601 val |= FORCE_DDR_FREQ_REQ_ACK;
5602 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
5604 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
5605 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
5606 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
5607 "assuming DDR DVFS is disabled\n");
5608 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
5610 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
5611 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
5612 wm->level = VLV_WM_LEVEL_DDR_DVFS;
5615 mutex_unlock(&dev_priv->rps.hw_lock);
5618 for_each_intel_crtc(dev, crtc) {
5619 struct intel_crtc_state *crtc_state =
5620 to_intel_crtc_state(crtc->base.state);
5621 struct vlv_wm_state *active = &crtc->wm.active.vlv;
5622 const struct vlv_fifo_state *fifo_state =
5623 &crtc_state->wm.vlv.fifo_state;
5624 enum pipe pipe = crtc->pipe;
5625 enum plane_id plane_id;
5628 vlv_get_fifo_size(crtc_state);
5630 active->num_levels = wm->level + 1;
5631 active->cxsr = wm->cxsr;
5633 for (level = 0; level < active->num_levels; level++) {
5634 struct g4x_pipe_wm *raw =
5635 &crtc_state->wm.vlv.raw[level];
5637 active->sr[level].plane = wm->sr.plane;
5638 active->sr[level].cursor = wm->sr.cursor;
5640 for_each_plane_id_on_crtc(crtc, plane_id) {
5641 active->wm[level].plane[plane_id] =
5642 wm->pipe[pipe].plane[plane_id];
5644 raw->plane[plane_id] =
5645 vlv_invert_wm_value(active->wm[level].plane[plane_id],
5646 fifo_state->plane[plane_id]);
5650 for_each_plane_id_on_crtc(crtc, plane_id)
5651 vlv_raw_plane_wm_set(crtc_state, level,
5652 plane_id, USHRT_MAX);
5653 vlv_invalidate_wms(crtc, active, level);
5655 crtc_state->wm.vlv.optimal = *active;
5656 crtc_state->wm.vlv.intermediate = *active;
5658 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
5660 wm->pipe[pipe].plane[PLANE_PRIMARY],
5661 wm->pipe[pipe].plane[PLANE_CURSOR],
5662 wm->pipe[pipe].plane[PLANE_SPRITE0],
5663 wm->pipe[pipe].plane[PLANE_SPRITE1]);
5666 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
5667 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
5670 void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
5672 struct intel_plane *plane;
5673 struct intel_crtc *crtc;
5675 mutex_lock(&dev_priv->wm.wm_mutex);
5677 for_each_intel_plane(&dev_priv->drm, plane) {
5678 struct intel_crtc *crtc =
5679 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
5680 struct intel_crtc_state *crtc_state =
5681 to_intel_crtc_state(crtc->base.state);
5682 struct intel_plane_state *plane_state =
5683 to_intel_plane_state(plane->base.state);
5684 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
5685 const struct vlv_fifo_state *fifo_state =
5686 &crtc_state->wm.vlv.fifo_state;
5687 enum plane_id plane_id = plane->id;
5690 if (plane_state->base.visible)
5693 for (level = 0; level < wm_state->num_levels; level++) {
5694 struct g4x_pipe_wm *raw =
5695 &crtc_state->wm.vlv.raw[level];
5697 raw->plane[plane_id] = 0;
5699 wm_state->wm[level].plane[plane_id] =
5700 vlv_invert_wm_value(raw->plane[plane_id],
5701 fifo_state->plane[plane_id]);
5705 for_each_intel_crtc(&dev_priv->drm, crtc) {
5706 struct intel_crtc_state *crtc_state =
5707 to_intel_crtc_state(crtc->base.state);
5709 crtc_state->wm.vlv.intermediate =
5710 crtc_state->wm.vlv.optimal;
5711 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
5714 vlv_program_watermarks(dev_priv);
5716 mutex_unlock(&dev_priv->wm.wm_mutex);
5720 * FIXME should probably kill this and improve
5721 * the real watermark readout/sanitation instead
5723 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
5725 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
5726 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
5727 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
5730 * Don't touch WM1S_LP_EN here.
5731 * Doing so could cause underruns.
5735 void ilk_wm_get_hw_state(struct drm_device *dev)
5737 struct drm_i915_private *dev_priv = to_i915(dev);
5738 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5739 struct drm_crtc *crtc;
5741 ilk_init_lp_watermarks(dev_priv);
5743 for_each_crtc(dev, crtc)
5744 ilk_pipe_wm_get_hw_state(crtc);
5746 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
5747 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
5748 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
5750 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
5751 if (INTEL_GEN(dev_priv) >= 7) {
5752 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
5753 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
5756 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5757 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
5758 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
5759 else if (IS_IVYBRIDGE(dev_priv))
5760 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
5761 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
5764 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
5768 * intel_update_watermarks - update FIFO watermark values based on current modes
5770 * Calculate watermark values for the various WM regs based on current mode
5771 * and plane configuration.
5773 * There are several cases to deal with here:
5774 * - normal (i.e. non-self-refresh)
5775 * - self-refresh (SR) mode
5776 * - lines are large relative to FIFO size (buffer can hold up to 2)
5777 * - lines are small relative to FIFO size (buffer can hold more than 2
5778 * lines), so need to account for TLB latency
5780 * The normal calculation is:
5781 * watermark = dotclock * bytes per pixel * latency
5782 * where latency is platform & configuration dependent (we assume pessimal
5785 * The SR calculation is:
5786 * watermark = (trunc(latency/line time)+1) * surface width *
5789 * line time = htotal / dotclock
5790 * surface width = hdisplay for normal plane and 64 for cursor
5791 * and latency is assumed to be high, as above.
5793 * The final value programmed to the register should always be rounded up,
5794 * and include an extra 2 entries to account for clock crossings.
5796 * We don't use the sprite, so we can ignore that. And on Crestline we have
5797 * to set the non-SR watermarks to 8.
5799 void intel_update_watermarks(struct intel_crtc *crtc)
5801 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5803 if (dev_priv->display.update_wm)
5804 dev_priv->display.update_wm(crtc);
5808 * Lock protecting IPS related data structures
5810 DEFINE_SPINLOCK(mchdev_lock);
5812 /* Global for IPS driver to get at the current i915 device. Protected by
5814 static struct drm_i915_private *i915_mch_dev;
5816 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
5820 lockdep_assert_held(&mchdev_lock);
5822 rgvswctl = I915_READ16(MEMSWCTL);
5823 if (rgvswctl & MEMCTL_CMD_STS) {
5824 DRM_DEBUG("gpu busy, RCS change rejected\n");
5825 return false; /* still busy with another command */
5828 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
5829 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5830 I915_WRITE16(MEMSWCTL, rgvswctl);
5831 POSTING_READ16(MEMSWCTL);
5833 rgvswctl |= MEMCTL_CMD_STS;
5834 I915_WRITE16(MEMSWCTL, rgvswctl);
5839 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
5842 u8 fmax, fmin, fstart, vstart;
5844 spin_lock_irq(&mchdev_lock);
5846 rgvmodectl = I915_READ(MEMMODECTL);
5848 /* Enable temp reporting */
5849 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
5850 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
5852 /* 100ms RC evaluation intervals */
5853 I915_WRITE(RCUPEI, 100000);
5854 I915_WRITE(RCDNEI, 100000);
5856 /* Set max/min thresholds to 90ms and 80ms respectively */
5857 I915_WRITE(RCBMAXAVG, 90000);
5858 I915_WRITE(RCBMINAVG, 80000);
5860 I915_WRITE(MEMIHYST, 1);
5862 /* Set up min, max, and cur for interrupt handling */
5863 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
5864 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5865 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5866 MEMMODE_FSTART_SHIFT;
5868 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
5871 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
5872 dev_priv->ips.fstart = fstart;
5874 dev_priv->ips.max_delay = fstart;
5875 dev_priv->ips.min_delay = fmin;
5876 dev_priv->ips.cur_delay = fstart;
5878 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5879 fmax, fmin, fstart);
5881 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5884 * Interrupts will be enabled in ironlake_irq_postinstall
5887 I915_WRITE(VIDSTART, vstart);
5888 POSTING_READ(VIDSTART);
5890 rgvmodectl |= MEMMODE_SWMODE_EN;
5891 I915_WRITE(MEMMODECTL, rgvmodectl);
5893 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
5894 DRM_ERROR("stuck trying to change perf mode\n");
5897 ironlake_set_drps(dev_priv, fstart);
5899 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
5900 I915_READ(DDREC) + I915_READ(CSIEC);
5901 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
5902 dev_priv->ips.last_count2 = I915_READ(GFXEC);
5903 dev_priv->ips.last_time2 = ktime_get_raw_ns();
5905 spin_unlock_irq(&mchdev_lock);
5908 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
5912 spin_lock_irq(&mchdev_lock);
5914 rgvswctl = I915_READ16(MEMSWCTL);
5916 /* Ack interrupts, disable EFC interrupt */
5917 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
5918 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
5919 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
5920 I915_WRITE(DEIIR, DE_PCU_EVENT);
5921 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
5923 /* Go back to the starting frequency */
5924 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
5926 rgvswctl |= MEMCTL_CMD_STS;
5927 I915_WRITE(MEMSWCTL, rgvswctl);
5930 spin_unlock_irq(&mchdev_lock);
5933 /* There's a funny hw issue where the hw returns all 0 when reading from
5934 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
5935 * ourselves, instead of doing a rmw cycle (which might result in us clearing
5936 * all limits and the gpu stuck at whatever frequency it is at atm).
5938 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
5942 /* Only set the down limit when we've reached the lowest level to avoid
5943 * getting more interrupts, otherwise leave this clear. This prevents a
5944 * race in the hw when coming out of rc6: There's a tiny window where
5945 * the hw runs at the minimal clock before selecting the desired
5946 * frequency, if the down threshold expires in that window we will not
5947 * receive a down interrupt. */
5948 if (INTEL_GEN(dev_priv) >= 9) {
5949 limits = (dev_priv->rps.max_freq_softlimit) << 23;
5950 if (val <= dev_priv->rps.min_freq_softlimit)
5951 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
5953 limits = dev_priv->rps.max_freq_softlimit << 24;
5954 if (val <= dev_priv->rps.min_freq_softlimit)
5955 limits |= dev_priv->rps.min_freq_softlimit << 16;
5961 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
5964 u32 threshold_up = 0, threshold_down = 0; /* in % */
5965 u32 ei_up = 0, ei_down = 0;
5967 new_power = dev_priv->rps.power;
5968 switch (dev_priv->rps.power) {
5970 if (val > dev_priv->rps.efficient_freq + 1 &&
5971 val > dev_priv->rps.cur_freq)
5972 new_power = BETWEEN;
5976 if (val <= dev_priv->rps.efficient_freq &&
5977 val < dev_priv->rps.cur_freq)
5978 new_power = LOW_POWER;
5979 else if (val >= dev_priv->rps.rp0_freq &&
5980 val > dev_priv->rps.cur_freq)
5981 new_power = HIGH_POWER;
5985 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
5986 val < dev_priv->rps.cur_freq)
5987 new_power = BETWEEN;
5990 /* Max/min bins are special */
5991 if (val <= dev_priv->rps.min_freq_softlimit)
5992 new_power = LOW_POWER;
5993 if (val >= dev_priv->rps.max_freq_softlimit)
5994 new_power = HIGH_POWER;
5995 if (new_power == dev_priv->rps.power)
5998 /* Note the units here are not exactly 1us, but 1280ns. */
5999 switch (new_power) {
6001 /* Upclock if more than 95% busy over 16ms */
6005 /* Downclock if less than 85% busy over 32ms */
6007 threshold_down = 85;
6011 /* Upclock if more than 90% busy over 13ms */
6015 /* Downclock if less than 75% busy over 32ms */
6017 threshold_down = 75;
6021 /* Upclock if more than 85% busy over 10ms */
6025 /* Downclock if less than 60% busy over 32ms */
6027 threshold_down = 60;
6031 /* When byt can survive without system hang with dynamic
6032 * sw freq adjustments, this restriction can be lifted.
6034 if (IS_VALLEYVIEW(dev_priv))
6037 I915_WRITE(GEN6_RP_UP_EI,
6038 GT_INTERVAL_FROM_US(dev_priv, ei_up));
6039 I915_WRITE(GEN6_RP_UP_THRESHOLD,
6040 GT_INTERVAL_FROM_US(dev_priv,
6041 ei_up * threshold_up / 100));
6043 I915_WRITE(GEN6_RP_DOWN_EI,
6044 GT_INTERVAL_FROM_US(dev_priv, ei_down));
6045 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
6046 GT_INTERVAL_FROM_US(dev_priv,
6047 ei_down * threshold_down / 100));
6049 I915_WRITE(GEN6_RP_CONTROL,
6050 GEN6_RP_MEDIA_TURBO |
6051 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6052 GEN6_RP_MEDIA_IS_GFX |
6054 GEN6_RP_UP_BUSY_AVG |
6055 GEN6_RP_DOWN_IDLE_AVG);
6058 dev_priv->rps.power = new_power;
6059 dev_priv->rps.up_threshold = threshold_up;
6060 dev_priv->rps.down_threshold = threshold_down;
6061 dev_priv->rps.last_adj = 0;
6064 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
6068 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
6069 if (val > dev_priv->rps.min_freq_softlimit)
6070 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
6071 if (val < dev_priv->rps.max_freq_softlimit)
6072 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
6074 mask &= dev_priv->pm_rps_events;
6076 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
6079 /* gen6_set_rps is called to update the frequency request, but should also be
6080 * called when the range (min_delay and max_delay) is modified so that we can
6081 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
6082 static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
6084 /* min/max delay may still have been modified so be sure to
6085 * write the limits value.
6087 if (val != dev_priv->rps.cur_freq) {
6088 gen6_set_rps_thresholds(dev_priv, val);
6090 if (INTEL_GEN(dev_priv) >= 9)
6091 I915_WRITE(GEN6_RPNSWREQ,
6092 GEN9_FREQUENCY(val));
6093 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6094 I915_WRITE(GEN6_RPNSWREQ,
6095 HSW_FREQUENCY(val));
6097 I915_WRITE(GEN6_RPNSWREQ,
6098 GEN6_FREQUENCY(val) |
6100 GEN6_AGGRESSIVE_TURBO);
6103 /* Make sure we continue to get interrupts
6104 * until we hit the minimum or maximum frequencies.
6106 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
6107 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
6109 dev_priv->rps.cur_freq = val;
6110 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6115 static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
6119 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
6120 "Odd GPU freq value\n"))
6123 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
6125 if (val != dev_priv->rps.cur_freq) {
6126 err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
6130 gen6_set_rps_thresholds(dev_priv, val);
6133 dev_priv->rps.cur_freq = val;
6134 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6139 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
6141 * * If Gfx is Idle, then
6142 * 1. Forcewake Media well.
6143 * 2. Request idle freq.
6144 * 3. Release Forcewake of Media well.
6146 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
6148 u32 val = dev_priv->rps.idle_freq;
6151 if (dev_priv->rps.cur_freq <= val)
6154 /* The punit delays the write of the frequency and voltage until it
6155 * determines the GPU is awake. During normal usage we don't want to
6156 * waste power changing the frequency if the GPU is sleeping (rc6).
6157 * However, the GPU and driver is now idle and we do not want to delay
6158 * switching to minimum voltage (reducing power whilst idle) as we do
6159 * not expect to be woken in the near future and so must flush the
6160 * change by waking the device.
6162 * We choose to take the media powerwell (either would do to trick the
6163 * punit into committing the voltage change) as that takes a lot less
6164 * power than the render powerwell.
6166 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
6167 err = valleyview_set_rps(dev_priv, val);
6168 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
6171 DRM_ERROR("Failed to set RPS for idle\n");
6174 void gen6_rps_busy(struct drm_i915_private *dev_priv)
6176 mutex_lock(&dev_priv->rps.hw_lock);
6177 if (dev_priv->rps.enabled) {
6180 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
6181 gen6_rps_reset_ei(dev_priv);
6182 I915_WRITE(GEN6_PMINTRMSK,
6183 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
6185 gen6_enable_rps_interrupts(dev_priv);
6187 /* Use the user's desired frequency as a guide, but for better
6188 * performance, jump directly to RPe as our starting frequency.
6190 freq = max(dev_priv->rps.cur_freq,
6191 dev_priv->rps.efficient_freq);
6193 if (intel_set_rps(dev_priv,
6195 dev_priv->rps.min_freq_softlimit,
6196 dev_priv->rps.max_freq_softlimit)))
6197 DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
6199 mutex_unlock(&dev_priv->rps.hw_lock);
6202 void gen6_rps_idle(struct drm_i915_private *dev_priv)
6204 /* Flush our bottom-half so that it does not race with us
6205 * setting the idle frequency and so that it is bounded by
6206 * our rpm wakeref. And then disable the interrupts to stop any
6207 * futher RPS reclocking whilst we are asleep.
6209 gen6_disable_rps_interrupts(dev_priv);
6211 mutex_lock(&dev_priv->rps.hw_lock);
6212 if (dev_priv->rps.enabled) {
6213 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6214 vlv_set_rps_idle(dev_priv);
6216 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
6217 dev_priv->rps.last_adj = 0;
6218 I915_WRITE(GEN6_PMINTRMSK,
6219 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
6221 mutex_unlock(&dev_priv->rps.hw_lock);
6224 void gen6_rps_boost(struct drm_i915_gem_request *rq,
6225 struct intel_rps_client *rps)
6227 struct drm_i915_private *i915 = rq->i915;
6230 /* This is intentionally racy! We peek at the state here, then
6231 * validate inside the RPS worker.
6233 if (!i915->rps.enabled)
6237 spin_lock_irq(&rq->lock);
6238 if (!rq->waitboost && !i915_gem_request_completed(rq)) {
6239 atomic_inc(&i915->rps.num_waiters);
6240 rq->waitboost = true;
6243 spin_unlock_irq(&rq->lock);
6247 if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq)
6248 schedule_work(&i915->rps.work);
6250 atomic_inc(rps ? &rps->boosts : &i915->rps.boosts);
6253 int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
6257 lockdep_assert_held(&dev_priv->rps.hw_lock);
6258 GEM_BUG_ON(val > dev_priv->rps.max_freq);
6259 GEM_BUG_ON(val < dev_priv->rps.min_freq);
6261 if (!dev_priv->rps.enabled) {
6262 dev_priv->rps.cur_freq = val;
6266 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6267 err = valleyview_set_rps(dev_priv, val);
6269 err = gen6_set_rps(dev_priv, val);
6274 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
6276 I915_WRITE(GEN6_RC_CONTROL, 0);
6277 I915_WRITE(GEN9_PG_ENABLE, 0);
6280 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
6282 I915_WRITE(GEN6_RP_CONTROL, 0);
6285 static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
6287 I915_WRITE(GEN6_RC_CONTROL, 0);
6290 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
6292 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6293 I915_WRITE(GEN6_RP_CONTROL, 0);
6296 static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
6298 I915_WRITE(GEN6_RC_CONTROL, 0);
6301 static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
6303 /* we're doing forcewake before Disabling RC6,
6304 * This what the BIOS expects when going into suspend */
6305 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6307 I915_WRITE(GEN6_RC_CONTROL, 0);
6309 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6312 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
6314 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6315 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
6316 mode = GEN6_RC_CTL_RC6_ENABLE;
6320 if (HAS_RC6p(dev_priv))
6321 DRM_DEBUG_DRIVER("Enabling RC6 states: "
6322 "RC6 %s RC6p %s RC6pp %s\n",
6323 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
6324 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
6325 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
6328 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
6329 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
6332 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
6334 struct i915_ggtt *ggtt = &dev_priv->ggtt;
6335 bool enable_rc6 = true;
6336 unsigned long rc6_ctx_base;
6340 rc_ctl = I915_READ(GEN6_RC_CONTROL);
6341 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
6342 RC_SW_TARGET_STATE_SHIFT;
6343 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
6344 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
6345 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
6346 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
6349 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
6350 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
6355 * The exact context size is not known for BXT, so assume a page size
6358 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
6359 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
6360 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
6361 ggtt->stolen_reserved_size))) {
6362 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
6366 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
6367 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
6368 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
6369 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
6370 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
6374 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
6375 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
6376 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
6377 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
6381 if (!I915_READ(GEN6_GFXPAUSE)) {
6382 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
6386 if (!I915_READ(GEN8_MISC_CTRL0)) {
6387 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
6394 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
6396 /* No RC6 before Ironlake and code is gone for ilk. */
6397 if (INTEL_INFO(dev_priv)->gen < 6)
6403 if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
6404 DRM_INFO("RC6 disabled by BIOS\n");
6408 /* Respect the kernel parameter if it is set */
6409 if (enable_rc6 >= 0) {
6412 if (HAS_RC6p(dev_priv))
6413 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
6416 mask = INTEL_RC6_ENABLE;
6418 if ((enable_rc6 & mask) != enable_rc6)
6419 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
6420 "(requested %d, valid %d)\n",
6421 enable_rc6 & mask, enable_rc6, mask);
6423 return enable_rc6 & mask;
6426 if (IS_IVYBRIDGE(dev_priv))
6427 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
6429 return INTEL_RC6_ENABLE;
6432 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
6434 /* All of these values are in units of 50MHz */
6436 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
6437 if (IS_GEN9_LP(dev_priv)) {
6438 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
6439 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
6440 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
6441 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
6443 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6444 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
6445 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
6446 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
6448 /* hw_max = RP0 until we check for overclocking */
6449 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
6451 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
6452 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
6453 IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
6454 u32 ddcc_status = 0;
6456 if (sandybridge_pcode_read(dev_priv,
6457 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
6459 dev_priv->rps.efficient_freq =
6461 ((ddcc_status >> 8) & 0xff),
6462 dev_priv->rps.min_freq,
6463 dev_priv->rps.max_freq);
6466 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
6467 /* Store the frequency values in 16.66 MHZ units, which is
6468 * the natural hardware unit for SKL
6470 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
6471 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
6472 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
6473 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
6474 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
6478 static void reset_rps(struct drm_i915_private *dev_priv,
6479 int (*set)(struct drm_i915_private *, u8))
6481 u8 freq = dev_priv->rps.cur_freq;
6484 dev_priv->rps.power = -1;
6485 dev_priv->rps.cur_freq = -1;
6487 if (set(dev_priv, freq))
6488 DRM_ERROR("Failed to reset RPS to initial values\n");
6491 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
6492 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
6494 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6496 /* Program defaults and thresholds for RPS*/
6497 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6498 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
6500 /* 1 second timeout*/
6501 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
6502 GT_INTERVAL_FROM_US(dev_priv, 1000000));
6504 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
6506 /* Leaning on the below call to gen6_set_rps to program/setup the
6507 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
6508 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
6509 reset_rps(dev_priv, gen6_set_rps);
6511 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6514 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
6516 struct intel_engine_cs *engine;
6517 enum intel_engine_id id;
6518 uint32_t rc6_mask = 0;
6520 /* 1a: Software RC state - RC0 */
6521 I915_WRITE(GEN6_RC_STATE, 0);
6523 /* 1b: Get forcewake during program sequence. Although the driver
6524 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6525 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6527 /* 2a: Disable RC states. */
6528 I915_WRITE(GEN6_RC_CONTROL, 0);
6530 /* 2b: Program RC6 thresholds.*/
6532 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
6533 if (IS_SKYLAKE(dev_priv))
6534 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
6536 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
6537 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6538 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6539 for_each_engine(engine, dev_priv, id)
6540 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6542 if (HAS_GUC(dev_priv))
6543 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
6545 I915_WRITE(GEN6_RC_SLEEP, 0);
6547 /* 2c: Program Coarse Power Gating Policies. */
6548 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
6549 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
6551 /* 3a: Enable RC6 */
6552 if (!dev_priv->rps.ctx_corrupted &&
6553 intel_enable_rc6() & INTEL_RC6_ENABLE)
6554 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
6555 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
6556 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
6557 I915_WRITE(GEN6_RC_CONTROL,
6558 GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask);
6561 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
6562 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
6564 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
6565 I915_WRITE(GEN9_PG_ENABLE, 0);
6567 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
6568 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
6570 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6573 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
6575 struct intel_engine_cs *engine;
6576 enum intel_engine_id id;
6577 uint32_t rc6_mask = 0;
6579 /* 1a: Software RC state - RC0 */
6580 I915_WRITE(GEN6_RC_STATE, 0);
6582 /* 1c & 1d: Get forcewake during program sequence. Although the driver
6583 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6584 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6586 /* 2a: Disable RC states. */
6587 I915_WRITE(GEN6_RC_CONTROL, 0);
6589 /* 2b: Program RC6 thresholds.*/
6590 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
6591 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6592 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6593 for_each_engine(engine, dev_priv, id)
6594 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6595 I915_WRITE(GEN6_RC_SLEEP, 0);
6596 if (IS_BROADWELL(dev_priv))
6597 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
6599 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
6602 if (!dev_priv->rps.ctx_corrupted &&
6603 intel_enable_rc6() & INTEL_RC6_ENABLE)
6604 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
6605 intel_print_rc6_info(dev_priv, rc6_mask);
6606 if (IS_BROADWELL(dev_priv))
6607 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
6608 GEN7_RC_CTL_TO_MODE |
6611 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
6612 GEN6_RC_CTL_EI_MODE(1) |
6615 /* 4 Program defaults and thresholds for RPS*/
6616 I915_WRITE(GEN6_RPNSWREQ,
6617 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
6618 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6619 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
6620 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
6621 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
6623 /* Docs recommend 900MHz, and 300 MHz respectively */
6624 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6625 dev_priv->rps.max_freq_softlimit << 24 |
6626 dev_priv->rps.min_freq_softlimit << 16);
6628 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
6629 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
6630 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
6631 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
6633 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6636 I915_WRITE(GEN6_RP_CONTROL,
6637 GEN6_RP_MEDIA_TURBO |
6638 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6639 GEN6_RP_MEDIA_IS_GFX |
6641 GEN6_RP_UP_BUSY_AVG |
6642 GEN6_RP_DOWN_IDLE_AVG);
6644 /* 6: Ring frequency + overclocking (our driver does this later */
6646 reset_rps(dev_priv, gen6_set_rps);
6648 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6651 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
6653 struct intel_engine_cs *engine;
6654 enum intel_engine_id id;
6655 u32 rc6vids, rc6_mask = 0;
6660 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6662 /* Here begins a magic sequence of register writes to enable
6663 * auto-downclocking.
6665 * Perhaps there might be some value in exposing these to
6668 I915_WRITE(GEN6_RC_STATE, 0);
6670 /* Clear the DBG now so we don't confuse earlier errors */
6671 gtfifodbg = I915_READ(GTFIFODBG);
6673 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
6674 I915_WRITE(GTFIFODBG, gtfifodbg);
6677 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6679 /* disable the counters and set deterministic thresholds */
6680 I915_WRITE(GEN6_RC_CONTROL, 0);
6682 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
6683 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
6684 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
6685 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6686 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6688 for_each_engine(engine, dev_priv, id)
6689 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6691 I915_WRITE(GEN6_RC_SLEEP, 0);
6692 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
6693 if (IS_IVYBRIDGE(dev_priv))
6694 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
6696 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
6697 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
6698 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6700 /* Check if we are enabling RC6 */
6701 rc6_mode = intel_enable_rc6();
6702 if (rc6_mode & INTEL_RC6_ENABLE)
6703 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
6705 /* We don't use those on Haswell */
6706 if (!IS_HASWELL(dev_priv)) {
6707 if (rc6_mode & INTEL_RC6p_ENABLE)
6708 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
6710 if (rc6_mode & INTEL_RC6pp_ENABLE)
6711 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
6714 intel_print_rc6_info(dev_priv, rc6_mask);
6716 I915_WRITE(GEN6_RC_CONTROL,
6718 GEN6_RC_CTL_EI_MODE(1) |
6719 GEN6_RC_CTL_HW_ENABLE);
6721 /* Power down if completely idle for over 50ms */
6722 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
6723 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6725 reset_rps(dev_priv, gen6_set_rps);
6728 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
6729 if (IS_GEN6(dev_priv) && ret) {
6730 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
6731 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
6732 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
6733 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
6734 rc6vids &= 0xffff00;
6735 rc6vids |= GEN6_ENCODE_RC6_VID(450);
6736 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
6738 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
6741 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6744 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
6747 unsigned int gpu_freq;
6748 unsigned int max_ia_freq, min_ring_freq;
6749 unsigned int max_gpu_freq, min_gpu_freq;
6750 int scaling_factor = 180;
6751 struct cpufreq_policy *policy;
6753 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6755 policy = cpufreq_cpu_get(0);
6757 max_ia_freq = policy->cpuinfo.max_freq;
6758 cpufreq_cpu_put(policy);
6761 * Default to measured freq if none found, PCU will ensure we
6764 max_ia_freq = tsc_khz;
6767 /* Convert from kHz to MHz */
6768 max_ia_freq /= 1000;
6770 min_ring_freq = I915_READ(DCLK) & 0xf;
6771 /* convert DDR frequency from units of 266.6MHz to bandwidth */
6772 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
6774 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
6775 /* Convert GT frequency to 50 HZ units */
6776 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
6777 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
6779 min_gpu_freq = dev_priv->rps.min_freq;
6780 max_gpu_freq = dev_priv->rps.max_freq;
6784 * For each potential GPU frequency, load a ring frequency we'd like
6785 * to use for memory access. We do this by specifying the IA frequency
6786 * the PCU should use as a reference to determine the ring frequency.
6788 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
6789 int diff = max_gpu_freq - gpu_freq;
6790 unsigned int ia_freq = 0, ring_freq = 0;
6792 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
6794 * ring_freq = 2 * GT. ring_freq is in 100MHz units
6795 * No floor required for ring frequency on SKL.
6797 ring_freq = gpu_freq;
6798 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
6799 /* max(2 * GT, DDR). NB: GT is 50MHz units */
6800 ring_freq = max(min_ring_freq, gpu_freq);
6801 } else if (IS_HASWELL(dev_priv)) {
6802 ring_freq = mult_frac(gpu_freq, 5, 4);
6803 ring_freq = max(min_ring_freq, ring_freq);
6804 /* leave ia_freq as the default, chosen by cpufreq */
6806 /* On older processors, there is no separate ring
6807 * clock domain, so in order to boost the bandwidth
6808 * of the ring, we need to upclock the CPU (ia_freq).
6810 * For GPU frequencies less than 750MHz,
6811 * just use the lowest ring freq.
6813 if (gpu_freq < min_freq)
6816 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
6817 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
6820 sandybridge_pcode_write(dev_priv,
6821 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
6822 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
6823 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
6828 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
6832 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
6834 switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
6836 /* (2 * 4) config */
6837 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
6840 /* (2 * 6) config */
6841 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
6844 /* (2 * 8) config */
6846 /* Setting (2 * 8) Min RP0 for any other combination */
6847 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
6851 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
6856 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
6860 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
6861 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
6866 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
6870 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
6871 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
6876 static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
6880 val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
6881 rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
6882 FB_GFX_FREQ_FUSE_MASK);
6887 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
6891 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
6893 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
6898 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
6902 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
6904 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
6906 rp0 = min_t(u32, rp0, 0xea);
6911 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
6915 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
6916 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
6917 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
6918 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
6923 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
6927 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
6929 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
6930 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
6931 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
6932 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
6933 * to make sure it matches what Punit accepts.
6935 return max_t(u32, val, 0xc0);
6938 /* Check that the pctx buffer wasn't move under us. */
6939 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
6941 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
6943 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
6944 dev_priv->vlv_pctx->stolen->start);
6948 /* Check that the pcbr address is not empty. */
6949 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
6951 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
6953 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
6956 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
6958 struct i915_ggtt *ggtt = &dev_priv->ggtt;
6959 unsigned long pctx_paddr, paddr;
6961 int pctx_size = 32*1024;
6963 pcbr = I915_READ(VLV_PCBR);
6964 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
6965 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
6966 paddr = (dev_priv->mm.stolen_base +
6967 (ggtt->stolen_size - pctx_size));
6969 pctx_paddr = (paddr & (~4095));
6970 I915_WRITE(VLV_PCBR, pctx_paddr);
6973 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
6976 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
6978 struct drm_i915_gem_object *pctx;
6979 unsigned long pctx_paddr;
6981 int pctx_size = 24*1024;
6983 pcbr = I915_READ(VLV_PCBR);
6985 /* BIOS set it up already, grab the pre-alloc'd space */
6988 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
6989 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
6991 I915_GTT_OFFSET_NONE,
6996 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
6999 * From the Gunit register HAS:
7000 * The Gfx driver is expected to program this register and ensure
7001 * proper allocation within Gfx stolen memory. For example, this
7002 * register should be programmed such than the PCBR range does not
7003 * overlap with other ranges, such as the frame buffer, protected
7004 * memory, or any other relevant ranges.
7006 pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
7008 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
7012 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
7013 I915_WRITE(VLV_PCBR, pctx_paddr);
7016 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
7017 dev_priv->vlv_pctx = pctx;
7020 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
7022 if (WARN_ON(!dev_priv->vlv_pctx))
7025 i915_gem_object_put(dev_priv->vlv_pctx);
7026 dev_priv->vlv_pctx = NULL;
7029 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
7031 dev_priv->rps.gpll_ref_freq =
7032 vlv_get_cck_clock(dev_priv, "GPLL ref",
7033 CCK_GPLL_CLOCK_CONTROL,
7034 dev_priv->czclk_freq);
7036 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
7037 dev_priv->rps.gpll_ref_freq);
7040 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
7044 valleyview_setup_pctx(dev_priv);
7046 vlv_init_gpll_ref_freq(dev_priv);
7048 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7049 switch ((val >> 6) & 3) {
7052 dev_priv->mem_freq = 800;
7055 dev_priv->mem_freq = 1066;
7058 dev_priv->mem_freq = 1333;
7061 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7063 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
7064 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
7065 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7066 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
7067 dev_priv->rps.max_freq);
7069 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
7070 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7071 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
7072 dev_priv->rps.efficient_freq);
7074 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
7075 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
7076 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
7077 dev_priv->rps.rp1_freq);
7079 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
7080 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7081 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
7082 dev_priv->rps.min_freq);
7085 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
7089 cherryview_setup_pctx(dev_priv);
7091 vlv_init_gpll_ref_freq(dev_priv);
7093 mutex_lock(&dev_priv->sb_lock);
7094 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
7095 mutex_unlock(&dev_priv->sb_lock);
7097 switch ((val >> 2) & 0x7) {
7099 dev_priv->mem_freq = 2000;
7102 dev_priv->mem_freq = 1600;
7105 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7107 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
7108 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
7109 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7110 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
7111 dev_priv->rps.max_freq);
7113 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
7114 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7115 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
7116 dev_priv->rps.efficient_freq);
7118 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
7119 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
7120 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
7121 dev_priv->rps.rp1_freq);
7123 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
7124 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7125 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
7126 dev_priv->rps.min_freq);
7128 WARN_ONCE((dev_priv->rps.max_freq |
7129 dev_priv->rps.efficient_freq |
7130 dev_priv->rps.rp1_freq |
7131 dev_priv->rps.min_freq) & 1,
7132 "Odd GPU freq values\n");
7135 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
7137 valleyview_cleanup_pctx(dev_priv);
7140 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
7142 struct intel_engine_cs *engine;
7143 enum intel_engine_id id;
7144 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
7146 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7148 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
7149 GT_FIFO_FREE_ENTRIES_CHV);
7151 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
7153 I915_WRITE(GTFIFODBG, gtfifodbg);
7156 cherryview_check_pctx(dev_priv);
7158 /* 1a & 1b: Get forcewake during program sequence. Although the driver
7159 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
7160 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
7162 /* Disable RC states. */
7163 I915_WRITE(GEN6_RC_CONTROL, 0);
7165 /* 2a: Program RC6 thresholds.*/
7166 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
7167 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
7168 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
7170 for_each_engine(engine, dev_priv, id)
7171 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7172 I915_WRITE(GEN6_RC_SLEEP, 0);
7174 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
7175 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
7177 /* allows RC6 residency counter to work */
7178 I915_WRITE(VLV_COUNTER_CONTROL,
7179 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
7180 VLV_MEDIA_RC6_COUNT_EN |
7181 VLV_RENDER_RC6_COUNT_EN));
7183 /* For now we assume BIOS is allocating and populating the PCBR */
7184 pcbr = I915_READ(VLV_PCBR);
7187 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
7188 (pcbr >> VLV_PCBR_ADDR_SHIFT))
7189 rc6_mode = GEN7_RC_CTL_TO_MODE;
7191 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
7193 /* 4 Program defaults and thresholds for RPS*/
7194 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7195 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
7196 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
7197 I915_WRITE(GEN6_RP_UP_EI, 66000);
7198 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
7200 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7203 I915_WRITE(GEN6_RP_CONTROL,
7204 GEN6_RP_MEDIA_HW_NORMAL_MODE |
7205 GEN6_RP_MEDIA_IS_GFX |
7207 GEN6_RP_UP_BUSY_AVG |
7208 GEN6_RP_DOWN_IDLE_AVG);
7210 /* Setting Fixed Bias */
7211 val = VLV_OVERRIDE_EN |
7213 CHV_BIAS_CPU_50_SOC_50;
7214 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
7216 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7218 /* RPS code assumes GPLL is used */
7219 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
7221 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
7222 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
7224 reset_rps(dev_priv, valleyview_set_rps);
7226 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
7229 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
7231 struct intel_engine_cs *engine;
7232 enum intel_engine_id id;
7233 u32 gtfifodbg, val, rc6_mode = 0;
7235 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7237 valleyview_check_pctx(dev_priv);
7239 gtfifodbg = I915_READ(GTFIFODBG);
7241 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
7243 I915_WRITE(GTFIFODBG, gtfifodbg);
7246 /* If VLV, Forcewake all wells, else re-direct to regular path */
7247 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
7249 /* Disable RC states. */
7250 I915_WRITE(GEN6_RC_CONTROL, 0);
7252 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7253 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
7254 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
7255 I915_WRITE(GEN6_RP_UP_EI, 66000);
7256 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
7258 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7260 I915_WRITE(GEN6_RP_CONTROL,
7261 GEN6_RP_MEDIA_TURBO |
7262 GEN6_RP_MEDIA_HW_NORMAL_MODE |
7263 GEN6_RP_MEDIA_IS_GFX |
7265 GEN6_RP_UP_BUSY_AVG |
7266 GEN6_RP_DOWN_IDLE_CONT);
7268 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
7269 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
7270 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
7272 for_each_engine(engine, dev_priv, id)
7273 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7275 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
7277 /* allows RC6 residency counter to work */
7278 I915_WRITE(VLV_COUNTER_CONTROL,
7279 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
7280 VLV_MEDIA_RC0_COUNT_EN |
7281 VLV_RENDER_RC0_COUNT_EN |
7282 VLV_MEDIA_RC6_COUNT_EN |
7283 VLV_RENDER_RC6_COUNT_EN));
7285 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
7286 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
7288 intel_print_rc6_info(dev_priv, rc6_mode);
7290 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
7292 /* Setting Fixed Bias */
7293 val = VLV_OVERRIDE_EN |
7295 VLV_BIAS_CPU_125_SOC_875;
7296 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
7298 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7300 /* RPS code assumes GPLL is used */
7301 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
7303 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
7304 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
7306 reset_rps(dev_priv, valleyview_set_rps);
7308 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
7311 static unsigned long intel_pxfreq(u32 vidfreq)
7314 int div = (vidfreq & 0x3f0000) >> 16;
7315 int post = (vidfreq & 0x3000) >> 12;
7316 int pre = (vidfreq & 0x7);
7321 freq = ((div * 133333) / ((1<<post) * pre));
7326 static const struct cparams {
7332 { 1, 1333, 301, 28664 },
7333 { 1, 1066, 294, 24460 },
7334 { 1, 800, 294, 25192 },
7335 { 0, 1333, 276, 27605 },
7336 { 0, 1066, 276, 27605 },
7337 { 0, 800, 231, 23784 },
7340 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
7342 u64 total_count, diff, ret;
7343 u32 count1, count2, count3, m = 0, c = 0;
7344 unsigned long now = jiffies_to_msecs(jiffies), diff1;
7347 lockdep_assert_held(&mchdev_lock);
7349 diff1 = now - dev_priv->ips.last_time1;
7351 /* Prevent division-by-zero if we are asking too fast.
7352 * Also, we don't get interesting results if we are polling
7353 * faster than once in 10ms, so just return the saved value
7357 return dev_priv->ips.chipset_power;
7359 count1 = I915_READ(DMIEC);
7360 count2 = I915_READ(DDREC);
7361 count3 = I915_READ(CSIEC);
7363 total_count = count1 + count2 + count3;
7365 /* FIXME: handle per-counter overflow */
7366 if (total_count < dev_priv->ips.last_count1) {
7367 diff = ~0UL - dev_priv->ips.last_count1;
7368 diff += total_count;
7370 diff = total_count - dev_priv->ips.last_count1;
7373 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
7374 if (cparams[i].i == dev_priv->ips.c_m &&
7375 cparams[i].t == dev_priv->ips.r_t) {
7382 diff = div_u64(diff, diff1);
7383 ret = ((m * diff) + c);
7384 ret = div_u64(ret, 10);
7386 dev_priv->ips.last_count1 = total_count;
7387 dev_priv->ips.last_time1 = now;
7389 dev_priv->ips.chipset_power = ret;
7394 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
7398 if (INTEL_INFO(dev_priv)->gen != 5)
7401 spin_lock_irq(&mchdev_lock);
7403 val = __i915_chipset_val(dev_priv);
7405 spin_unlock_irq(&mchdev_lock);
7410 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
7412 unsigned long m, x, b;
7415 tsfs = I915_READ(TSFS);
7417 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
7418 x = I915_READ8(TR1);
7420 b = tsfs & TSFS_INTR_MASK;
7422 return ((m * x) / 127) - b;
7425 static int _pxvid_to_vd(u8 pxvid)
7430 if (pxvid >= 8 && pxvid < 31)
7433 return (pxvid + 2) * 125;
7436 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
7438 const int vd = _pxvid_to_vd(pxvid);
7439 const int vm = vd - 1125;
7441 if (INTEL_INFO(dev_priv)->is_mobile)
7442 return vm > 0 ? vm : 0;
7447 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
7449 u64 now, diff, diffms;
7452 lockdep_assert_held(&mchdev_lock);
7454 now = ktime_get_raw_ns();
7455 diffms = now - dev_priv->ips.last_time2;
7456 do_div(diffms, NSEC_PER_MSEC);
7458 /* Don't divide by 0 */
7462 count = I915_READ(GFXEC);
7464 if (count < dev_priv->ips.last_count2) {
7465 diff = ~0UL - dev_priv->ips.last_count2;
7468 diff = count - dev_priv->ips.last_count2;
7471 dev_priv->ips.last_count2 = count;
7472 dev_priv->ips.last_time2 = now;
7474 /* More magic constants... */
7476 diff = div_u64(diff, diffms * 10);
7477 dev_priv->ips.gfx_power = diff;
7480 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
7482 if (INTEL_INFO(dev_priv)->gen != 5)
7485 spin_lock_irq(&mchdev_lock);
7487 __i915_update_gfx_val(dev_priv);
7489 spin_unlock_irq(&mchdev_lock);
7492 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
7494 unsigned long t, corr, state1, corr2, state2;
7497 lockdep_assert_held(&mchdev_lock);
7499 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
7500 pxvid = (pxvid >> 24) & 0x7f;
7501 ext_v = pvid_to_extvid(dev_priv, pxvid);
7505 t = i915_mch_val(dev_priv);
7507 /* Revel in the empirically derived constants */
7509 /* Correction factor in 1/100000 units */
7511 corr = ((t * 2349) + 135940);
7513 corr = ((t * 964) + 29317);
7515 corr = ((t * 301) + 1004);
7517 corr = corr * ((150142 * state1) / 10000 - 78642);
7519 corr2 = (corr * dev_priv->ips.corr);
7521 state2 = (corr2 * state1) / 10000;
7522 state2 /= 100; /* convert to mW */
7524 __i915_update_gfx_val(dev_priv);
7526 return dev_priv->ips.gfx_power + state2;
7529 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
7533 if (INTEL_INFO(dev_priv)->gen != 5)
7536 spin_lock_irq(&mchdev_lock);
7538 val = __i915_gfx_val(dev_priv);
7540 spin_unlock_irq(&mchdev_lock);
7546 * i915_read_mch_val - return value for IPS use
7548 * Calculate and return a value for the IPS driver to use when deciding whether
7549 * we have thermal and power headroom to increase CPU or GPU power budget.
7551 unsigned long i915_read_mch_val(void)
7553 struct drm_i915_private *dev_priv;
7554 unsigned long chipset_val, graphics_val, ret = 0;
7556 spin_lock_irq(&mchdev_lock);
7559 dev_priv = i915_mch_dev;
7561 chipset_val = __i915_chipset_val(dev_priv);
7562 graphics_val = __i915_gfx_val(dev_priv);
7564 ret = chipset_val + graphics_val;
7567 spin_unlock_irq(&mchdev_lock);
7571 EXPORT_SYMBOL_GPL(i915_read_mch_val);
7574 * i915_gpu_raise - raise GPU frequency limit
7576 * Raise the limit; IPS indicates we have thermal headroom.
7578 bool i915_gpu_raise(void)
7580 struct drm_i915_private *dev_priv;
7583 spin_lock_irq(&mchdev_lock);
7584 if (!i915_mch_dev) {
7588 dev_priv = i915_mch_dev;
7590 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
7591 dev_priv->ips.max_delay--;
7594 spin_unlock_irq(&mchdev_lock);
7598 EXPORT_SYMBOL_GPL(i915_gpu_raise);
7601 * i915_gpu_lower - lower GPU frequency limit
7603 * IPS indicates we're close to a thermal limit, so throttle back the GPU
7604 * frequency maximum.
7606 bool i915_gpu_lower(void)
7608 struct drm_i915_private *dev_priv;
7611 spin_lock_irq(&mchdev_lock);
7612 if (!i915_mch_dev) {
7616 dev_priv = i915_mch_dev;
7618 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
7619 dev_priv->ips.max_delay++;
7622 spin_unlock_irq(&mchdev_lock);
7626 EXPORT_SYMBOL_GPL(i915_gpu_lower);
7629 * i915_gpu_busy - indicate GPU business to IPS
7631 * Tell the IPS driver whether or not the GPU is busy.
7633 bool i915_gpu_busy(void)
7637 spin_lock_irq(&mchdev_lock);
7639 ret = i915_mch_dev->gt.awake;
7640 spin_unlock_irq(&mchdev_lock);
7644 EXPORT_SYMBOL_GPL(i915_gpu_busy);
7647 * i915_gpu_turbo_disable - disable graphics turbo
7649 * Disable graphics turbo by resetting the max frequency and setting the
7650 * current frequency to the default.
7652 bool i915_gpu_turbo_disable(void)
7654 struct drm_i915_private *dev_priv;
7657 spin_lock_irq(&mchdev_lock);
7658 if (!i915_mch_dev) {
7662 dev_priv = i915_mch_dev;
7664 dev_priv->ips.max_delay = dev_priv->ips.fstart;
7666 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
7670 spin_unlock_irq(&mchdev_lock);
7674 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
7677 * Tells the intel_ips driver that the i915 driver is now loaded, if
7678 * IPS got loaded first.
7680 * This awkward dance is so that neither module has to depend on the
7681 * other in order for IPS to do the appropriate communication of
7682 * GPU turbo limits to i915.
7685 ips_ping_for_i915_load(void)
7689 link = symbol_get(ips_link_to_i915_driver);
7692 symbol_put(ips_link_to_i915_driver);
7696 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
7698 /* We only register the i915 ips part with intel-ips once everything is
7699 * set up, to avoid intel-ips sneaking in and reading bogus values. */
7700 spin_lock_irq(&mchdev_lock);
7701 i915_mch_dev = dev_priv;
7702 spin_unlock_irq(&mchdev_lock);
7704 ips_ping_for_i915_load();
7707 void intel_gpu_ips_teardown(void)
7709 spin_lock_irq(&mchdev_lock);
7710 i915_mch_dev = NULL;
7711 spin_unlock_irq(&mchdev_lock);
7714 static void intel_init_emon(struct drm_i915_private *dev_priv)
7720 /* Disable to program */
7724 /* Program energy weights for various events */
7725 I915_WRITE(SDEW, 0x15040d00);
7726 I915_WRITE(CSIEW0, 0x007f0000);
7727 I915_WRITE(CSIEW1, 0x1e220004);
7728 I915_WRITE(CSIEW2, 0x04000004);
7730 for (i = 0; i < 5; i++)
7731 I915_WRITE(PEW(i), 0);
7732 for (i = 0; i < 3; i++)
7733 I915_WRITE(DEW(i), 0);
7735 /* Program P-state weights to account for frequency power adjustment */
7736 for (i = 0; i < 16; i++) {
7737 u32 pxvidfreq = I915_READ(PXVFREQ(i));
7738 unsigned long freq = intel_pxfreq(pxvidfreq);
7739 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
7744 val *= (freq / 1000);
7746 val /= (127*127*900);
7748 DRM_ERROR("bad pxval: %ld\n", val);
7751 /* Render standby states get 0 weight */
7755 for (i = 0; i < 4; i++) {
7756 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
7757 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
7758 I915_WRITE(PXW(i), val);
7761 /* Adjust magic regs to magic values (more experimental results) */
7762 I915_WRITE(OGW0, 0);
7763 I915_WRITE(OGW1, 0);
7764 I915_WRITE(EG0, 0x00007f00);
7765 I915_WRITE(EG1, 0x0000000e);
7766 I915_WRITE(EG2, 0x000e0000);
7767 I915_WRITE(EG3, 0x68000300);
7768 I915_WRITE(EG4, 0x42000000);
7769 I915_WRITE(EG5, 0x00140031);
7773 for (i = 0; i < 8; i++)
7774 I915_WRITE(PXWL(i), 0);
7776 /* Enable PMON + select events */
7777 I915_WRITE(ECR, 0x80000019);
7779 lcfuse = I915_READ(LCFUSE02);
7781 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
7784 static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
7786 return !I915_READ(GEN8_RC6_CTX_INFO);
7789 static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
7791 if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
7794 if (i915_rc6_ctx_corrupted(i915)) {
7795 DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
7796 i915->rps.ctx_corrupted = true;
7797 intel_runtime_pm_get(i915);
7801 static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
7803 if (i915->rps.ctx_corrupted) {
7804 intel_runtime_pm_put(i915);
7805 i915->rps.ctx_corrupted = false;
7810 * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
7811 * @i915: i915 device
7813 * Perform any steps needed to clean up the RC6 CTX WA before system suspend.
7815 void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
7817 if (i915->rps.ctx_corrupted)
7818 intel_runtime_pm_put(i915);
7822 * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
7823 * @i915: i915 device
7825 * Perform any steps needed to re-init the RC6 CTX WA after system resume.
7827 void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
7829 if (!i915->rps.ctx_corrupted)
7832 if (i915_rc6_ctx_corrupted(i915)) {
7833 intel_runtime_pm_get(i915);
7837 DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
7838 i915->rps.ctx_corrupted = false;
7841 static void intel_disable_rc6(struct drm_i915_private *dev_priv);
7844 * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
7845 * @i915: i915 device
7847 * Check if an RC6 CTX corruption has happened since the last check and if so
7848 * disable RC6 and runtime power management.
7850 * Return false if no context corruption has happened since the last call of
7851 * this function, true otherwise.
7853 bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
7855 if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
7858 if (i915->rps.ctx_corrupted)
7861 if (!i915_rc6_ctx_corrupted(i915))
7864 DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
7866 intel_disable_rc6(i915);
7867 i915->rps.ctx_corrupted = true;
7868 intel_runtime_pm_get_noresume(i915);
7873 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
7876 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7879 if (!i915.enable_rc6) {
7880 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7881 intel_runtime_pm_get(dev_priv);
7884 mutex_lock(&dev_priv->drm.struct_mutex);
7885 mutex_lock(&dev_priv->rps.hw_lock);
7887 i915_rc6_ctx_wa_init(dev_priv);
7889 /* Initialize RPS limits (for userspace) */
7890 if (IS_CHERRYVIEW(dev_priv))
7891 cherryview_init_gt_powersave(dev_priv);
7892 else if (IS_VALLEYVIEW(dev_priv))
7893 valleyview_init_gt_powersave(dev_priv);
7894 else if (INTEL_GEN(dev_priv) >= 6)
7895 gen6_init_rps_frequencies(dev_priv);
7897 /* Derive initial user preferences/limits from the hardware limits */
7898 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
7899 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
7901 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
7902 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
7904 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
7905 dev_priv->rps.min_freq_softlimit =
7907 dev_priv->rps.efficient_freq,
7908 intel_freq_opcode(dev_priv, 450));
7910 /* After setting max-softlimit, find the overclock max freq */
7911 if (IS_GEN6(dev_priv) ||
7912 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
7915 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
7916 if (params & BIT(31)) { /* OC supported */
7917 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
7918 (dev_priv->rps.max_freq & 0xff) * 50,
7919 (params & 0xff) * 50);
7920 dev_priv->rps.max_freq = params & 0xff;
7924 /* Finally allow us to boost to max by default */
7925 dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
7927 mutex_unlock(&dev_priv->rps.hw_lock);
7928 mutex_unlock(&dev_priv->drm.struct_mutex);
7930 intel_autoenable_gt_powersave(dev_priv);
7933 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
7935 if (IS_VALLEYVIEW(dev_priv))
7936 valleyview_cleanup_gt_powersave(dev_priv);
7938 i915_rc6_ctx_wa_cleanup(dev_priv);
7940 if (!i915.enable_rc6)
7941 intel_runtime_pm_put(dev_priv);
7945 * intel_suspend_gt_powersave - suspend PM work and helper threads
7946 * @dev_priv: i915 device
7948 * We don't want to disable RC6 or other features here, we just want
7949 * to make sure any work we've queued has finished and won't bother
7950 * us while we're suspended.
7952 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
7954 if (INTEL_GEN(dev_priv) < 6)
7957 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
7958 intel_runtime_pm_put(dev_priv);
7960 /* gen6_rps_idle() will be called later to disable interrupts */
7963 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
7965 dev_priv->rps.enabled = true; /* force disabling */
7966 intel_disable_gt_powersave(dev_priv);
7968 gen6_reset_rps_interrupts(dev_priv);
7971 static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
7973 if (INTEL_GEN(dev_priv) >= 9)
7974 gen9_disable_rc6(dev_priv);
7975 else if (IS_CHERRYVIEW(dev_priv))
7976 cherryview_disable_rc6(dev_priv);
7977 else if (IS_VALLEYVIEW(dev_priv))
7978 valleyview_disable_rc6(dev_priv);
7979 else if (INTEL_GEN(dev_priv) >= 6)
7980 gen6_disable_rc6(dev_priv);
7983 static void intel_disable_rc6(struct drm_i915_private *dev_priv)
7985 mutex_lock(&dev_priv->rps.hw_lock);
7986 __intel_disable_rc6(dev_priv);
7987 mutex_unlock(&dev_priv->rps.hw_lock);
7990 static void intel_disable_rps(struct drm_i915_private *dev_priv)
7992 if (INTEL_GEN(dev_priv) >= 9)
7993 gen9_disable_rps(dev_priv);
7994 else if (INTEL_GEN(dev_priv) >= 6)
7995 gen6_disable_rps(dev_priv);
7996 else if (IS_IRONLAKE_M(dev_priv))
7997 ironlake_disable_drps(dev_priv);
8000 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
8002 if (!READ_ONCE(dev_priv->rps.enabled))
8005 mutex_lock(&dev_priv->rps.hw_lock);
8007 __intel_disable_rc6(dev_priv);
8008 intel_disable_rps(dev_priv);
8010 dev_priv->rps.enabled = false;
8012 mutex_unlock(&dev_priv->rps.hw_lock);
8015 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
8017 /* We shouldn't be disabling as we submit, so this should be less
8018 * racy than it appears!
8020 if (READ_ONCE(dev_priv->rps.enabled))
8023 /* Powersaving is controlled by the host when inside a VM */
8024 if (intel_vgpu_active(dev_priv))
8027 mutex_lock(&dev_priv->rps.hw_lock);
8029 if (IS_CHERRYVIEW(dev_priv)) {
8030 cherryview_enable_rps(dev_priv);
8031 } else if (IS_VALLEYVIEW(dev_priv)) {
8032 valleyview_enable_rps(dev_priv);
8033 } else if (INTEL_GEN(dev_priv) >= 9) {
8034 gen9_enable_rc6(dev_priv);
8035 gen9_enable_rps(dev_priv);
8036 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
8037 gen6_update_ring_freq(dev_priv);
8038 } else if (IS_BROADWELL(dev_priv)) {
8039 gen8_enable_rps(dev_priv);
8040 gen6_update_ring_freq(dev_priv);
8041 } else if (INTEL_GEN(dev_priv) >= 6) {
8042 gen6_enable_rps(dev_priv);
8043 gen6_update_ring_freq(dev_priv);
8044 } else if (IS_IRONLAKE_M(dev_priv)) {
8045 ironlake_enable_drps(dev_priv);
8046 intel_init_emon(dev_priv);
8049 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
8050 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
8052 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
8053 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
8055 dev_priv->rps.enabled = true;
8056 mutex_unlock(&dev_priv->rps.hw_lock);
8059 static void __intel_autoenable_gt_powersave(struct work_struct *work)
8061 struct drm_i915_private *dev_priv =
8062 container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
8063 struct intel_engine_cs *rcs;
8064 struct drm_i915_gem_request *req;
8066 if (READ_ONCE(dev_priv->rps.enabled))
8069 rcs = dev_priv->engine[RCS];
8070 if (rcs->last_retired_context)
8073 if (!rcs->init_context)
8076 mutex_lock(&dev_priv->drm.struct_mutex);
8078 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
8082 if (!i915.enable_execlists && i915_switch_context(req) == 0)
8083 rcs->init_context(req);
8085 /* Mark the device busy, calling intel_enable_gt_powersave() */
8086 i915_add_request(req);
8089 mutex_unlock(&dev_priv->drm.struct_mutex);
8091 intel_runtime_pm_put(dev_priv);
8094 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
8096 if (READ_ONCE(dev_priv->rps.enabled))
8099 if (IS_IRONLAKE_M(dev_priv)) {
8100 ironlake_enable_drps(dev_priv);
8101 intel_init_emon(dev_priv);
8102 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
8104 * PCU communication is slow and this doesn't need to be
8105 * done at any specific time, so do this out of our fast path
8106 * to make resume and init faster.
8108 * We depend on the HW RC6 power context save/restore
8109 * mechanism when entering D3 through runtime PM suspend. So
8110 * disable RPM until RPS/RC6 is properly setup. We can only
8111 * get here via the driver load/system resume/runtime resume
8112 * paths, so the _noresume version is enough (and in case of
8113 * runtime resume it's necessary).
8115 if (queue_delayed_work(dev_priv->wq,
8116 &dev_priv->rps.autoenable_work,
8117 round_jiffies_up_relative(HZ)))
8118 intel_runtime_pm_get_noresume(dev_priv);
8122 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
8125 * On Ibex Peak and Cougar Point, we need to disable clock
8126 * gating for the panel power sequencer or it will fail to
8127 * start up when no ports are active.
8129 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8132 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
8136 for_each_pipe(dev_priv, pipe) {
8137 I915_WRITE(DSPCNTR(pipe),
8138 I915_READ(DSPCNTR(pipe)) |
8139 DISPPLANE_TRICKLE_FEED_DISABLE);
8141 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
8142 POSTING_READ(DSPSURF(pipe));
8146 static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
8148 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
8152 * WaFbcDisableDpfcClockGating:ilk
8154 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
8155 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
8156 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
8158 I915_WRITE(PCH_3DCGDIS0,
8159 MARIUNIT_CLOCK_GATE_DISABLE |
8160 SVSMUNIT_CLOCK_GATE_DISABLE);
8161 I915_WRITE(PCH_3DCGDIS1,
8162 VFMUNIT_CLOCK_GATE_DISABLE);
8165 * According to the spec the following bits should be set in
8166 * order to enable memory self-refresh
8167 * The bit 22/21 of 0x42004
8168 * The bit 5 of 0x42020
8169 * The bit 15 of 0x45000
8171 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8172 (I915_READ(ILK_DISPLAY_CHICKEN2) |
8173 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8174 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
8175 I915_WRITE(DISP_ARB_CTL,
8176 (I915_READ(DISP_ARB_CTL) |
8180 * Based on the document from hardware guys the following bits
8181 * should be set unconditionally in order to enable FBC.
8182 * The bit 22 of 0x42000
8183 * The bit 22 of 0x42004
8184 * The bit 7,8,9 of 0x42020.
8186 if (IS_IRONLAKE_M(dev_priv)) {
8187 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
8188 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8189 I915_READ(ILK_DISPLAY_CHICKEN1) |
8191 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8192 I915_READ(ILK_DISPLAY_CHICKEN2) |
8196 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
8198 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8199 I915_READ(ILK_DISPLAY_CHICKEN2) |
8200 ILK_ELPIN_409_SELECT);
8201 I915_WRITE(_3D_CHICKEN2,
8202 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8203 _3D_CHICKEN2_WM_READ_PIPELINED);
8205 /* WaDisableRenderCachePipelinedFlush:ilk */
8206 I915_WRITE(CACHE_MODE_0,
8207 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
8209 /* WaDisable_RenderCache_OperationalFlush:ilk */
8210 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8212 g4x_disable_trickle_feed(dev_priv);
8214 ibx_init_clock_gating(dev_priv);
8217 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
8223 * On Ibex Peak and Cougar Point, we need to disable clock
8224 * gating for the panel power sequencer or it will fail to
8225 * start up when no ports are active.
8227 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
8228 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
8229 PCH_CPUNIT_CLOCK_GATE_DISABLE);
8230 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8231 DPLS_EDP_PPS_FIX_DIS);
8232 /* The below fixes the weird display corruption, a few pixels shifted
8233 * downward, on (only) LVDS of some HP laptops with IVY.
8235 for_each_pipe(dev_priv, pipe) {
8236 val = I915_READ(TRANS_CHICKEN2(pipe));
8237 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
8238 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
8239 if (dev_priv->vbt.fdi_rx_polarity_inverted)
8240 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
8241 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
8242 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
8243 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
8244 I915_WRITE(TRANS_CHICKEN2(pipe), val);
8246 /* WADP0ClockGatingDisable */
8247 for_each_pipe(dev_priv, pipe) {
8248 I915_WRITE(TRANS_CHICKEN1(pipe),
8249 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
8253 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
8257 tmp = I915_READ(MCH_SSKPD);
8258 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
8259 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
8263 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
8265 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
8267 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
8269 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8270 I915_READ(ILK_DISPLAY_CHICKEN2) |
8271 ILK_ELPIN_409_SELECT);
8273 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
8274 I915_WRITE(_3D_CHICKEN,
8275 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
8277 /* WaDisable_RenderCache_OperationalFlush:snb */
8278 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8281 * BSpec recoomends 8x4 when MSAA is used,
8282 * however in practice 16x4 seems fastest.
8284 * Note that PS/WM thread counts depend on the WIZ hashing
8285 * disable bit, which we don't touch here, but it's good
8286 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8288 I915_WRITE(GEN6_GT_MODE,
8289 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8291 I915_WRITE(CACHE_MODE_0,
8292 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
8294 I915_WRITE(GEN6_UCGCTL1,
8295 I915_READ(GEN6_UCGCTL1) |
8296 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
8297 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
8299 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8300 * gating disable must be set. Failure to set it results in
8301 * flickering pixels due to Z write ordering failures after
8302 * some amount of runtime in the Mesa "fire" demo, and Unigine
8303 * Sanctuary and Tropics, and apparently anything else with
8304 * alpha test or pixel discard.
8306 * According to the spec, bit 11 (RCCUNIT) must also be set,
8307 * but we didn't debug actual testcases to find it out.
8309 * WaDisableRCCUnitClockGating:snb
8310 * WaDisableRCPBUnitClockGating:snb
8312 I915_WRITE(GEN6_UCGCTL2,
8313 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8314 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8316 /* WaStripsFansDisableFastClipPerformanceFix:snb */
8317 I915_WRITE(_3D_CHICKEN3,
8318 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
8322 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
8323 * 3DSTATE_SF number of SF output attributes is more than 16."
8325 I915_WRITE(_3D_CHICKEN3,
8326 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
8329 * According to the spec the following bits should be
8330 * set in order to enable memory self-refresh and fbc:
8331 * The bit21 and bit22 of 0x42000
8332 * The bit21 and bit22 of 0x42004
8333 * The bit5 and bit7 of 0x42020
8334 * The bit14 of 0x70180
8335 * The bit14 of 0x71180
8337 * WaFbcAsynchFlipDisableFbcQueue:snb
8339 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8340 I915_READ(ILK_DISPLAY_CHICKEN1) |
8341 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8342 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8343 I915_READ(ILK_DISPLAY_CHICKEN2) |
8344 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8345 I915_WRITE(ILK_DSPCLK_GATE_D,
8346 I915_READ(ILK_DSPCLK_GATE_D) |
8347 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
8348 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
8350 g4x_disable_trickle_feed(dev_priv);
8352 cpt_init_clock_gating(dev_priv);
8354 gen6_check_mch_setup(dev_priv);
8357 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
8359 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
8362 * WaVSThreadDispatchOverride:ivb,vlv
8364 * This actually overrides the dispatch
8365 * mode for all thread types.
8367 reg &= ~GEN7_FF_SCHED_MASK;
8368 reg |= GEN7_FF_TS_SCHED_HW;
8369 reg |= GEN7_FF_VS_SCHED_HW;
8370 reg |= GEN7_FF_DS_SCHED_HW;
8372 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
8375 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
8378 * TODO: this bit should only be enabled when really needed, then
8379 * disabled when not needed anymore in order to save power.
8381 if (HAS_PCH_LPT_LP(dev_priv))
8382 I915_WRITE(SOUTH_DSPCLK_GATE_D,
8383 I915_READ(SOUTH_DSPCLK_GATE_D) |
8384 PCH_LP_PARTITION_LEVEL_DISABLE);
8386 /* WADPOClockGatingDisable:hsw */
8387 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
8388 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
8389 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
8392 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
8394 if (HAS_PCH_LPT_LP(dev_priv)) {
8395 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
8397 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
8398 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8402 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
8403 int general_prio_credits,
8404 int high_prio_credits)
8409 /* WaTempDisableDOPClkGating:bdw */
8410 misccpctl = I915_READ(GEN7_MISCCPCTL);
8411 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
8413 val = I915_READ(GEN8_L3SQCREG1);
8414 val &= ~L3_PRIO_CREDITS_MASK;
8415 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
8416 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
8417 I915_WRITE(GEN8_L3SQCREG1, val);
8420 * Wait at least 100 clocks before re-enabling clock gating.
8421 * See the definition of L3SQCREG1 in BSpec.
8423 POSTING_READ(GEN8_L3SQCREG1);
8425 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
8428 static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
8430 gen9_init_clock_gating(dev_priv);
8432 /* WaDisableSDEUnitClockGating:kbl */
8433 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
8434 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
8435 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8437 /* WaDisableGamClockGating:kbl */
8438 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
8439 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
8440 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
8442 /* WaFbcNukeOnHostModify:kbl,cfl */
8443 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
8444 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
8447 static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
8449 gen9_init_clock_gating(dev_priv);
8451 /* WAC6entrylatency:skl */
8452 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
8453 FBC_LLC_FULLY_OPEN);
8455 /* WaFbcNukeOnHostModify:skl */
8456 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
8457 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
8460 static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
8464 /* WaSwitchSolVfFArbitrationPriority:bdw */
8465 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
8467 /* WaPsrDPAMaskVBlankInSRD:bdw */
8468 I915_WRITE(CHICKEN_PAR1_1,
8469 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
8471 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
8472 for_each_pipe(dev_priv, pipe) {
8473 I915_WRITE(CHICKEN_PIPESL_1(pipe),
8474 I915_READ(CHICKEN_PIPESL_1(pipe)) |
8475 BDW_DPRS_MASK_VBLANK_SRD);
8478 /* WaVSRefCountFullforceMissDisable:bdw */
8479 /* WaDSRefCountFullforceMissDisable:bdw */
8480 I915_WRITE(GEN7_FF_THREAD_MODE,
8481 I915_READ(GEN7_FF_THREAD_MODE) &
8482 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
8484 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
8485 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
8487 /* WaDisableSDEUnitClockGating:bdw */
8488 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
8489 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8491 /* WaProgramL3SqcReg1Default:bdw */
8492 gen8_set_l3sqc_credits(dev_priv, 30, 2);
8495 * WaGttCachingOffByDefault:bdw
8496 * GTT cache may not work with big pages, so if those
8497 * are ever enabled GTT cache may need to be disabled.
8499 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
8501 /* WaKVMNotificationOnConfigChange:bdw */
8502 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
8503 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
8505 lpt_init_clock_gating(dev_priv);
8507 /* WaDisableDopClockGating:bdw
8509 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
8512 I915_WRITE(GEN6_UCGCTL1,
8513 I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
8516 static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
8518 /* L3 caching of data atomics doesn't work -- disable it. */
8519 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
8520 I915_WRITE(HSW_ROW_CHICKEN3,
8521 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
8523 /* This is required by WaCatErrorRejectionIssue:hsw */
8524 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8525 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8526 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8528 /* WaVSRefCountFullforceMissDisable:hsw */
8529 I915_WRITE(GEN7_FF_THREAD_MODE,
8530 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
8532 /* WaDisable_RenderCache_OperationalFlush:hsw */
8533 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8535 /* enable HiZ Raw Stall Optimization */
8536 I915_WRITE(CACHE_MODE_0_GEN7,
8537 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
8539 /* WaDisable4x2SubspanOptimization:hsw */
8540 I915_WRITE(CACHE_MODE_1,
8541 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
8544 * BSpec recommends 8x4 when MSAA is used,
8545 * however in practice 16x4 seems fastest.
8547 * Note that PS/WM thread counts depend on the WIZ hashing
8548 * disable bit, which we don't touch here, but it's good
8549 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8551 I915_WRITE(GEN7_GT_MODE,
8552 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8554 /* WaSampleCChickenBitEnable:hsw */
8555 I915_WRITE(HALF_SLICE_CHICKEN3,
8556 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
8558 /* WaSwitchSolVfFArbitrationPriority:hsw */
8559 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
8561 lpt_init_clock_gating(dev_priv);
8564 static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
8568 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
8570 /* WaDisableEarlyCull:ivb */
8571 I915_WRITE(_3D_CHICKEN3,
8572 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
8574 /* WaDisableBackToBackFlipFix:ivb */
8575 I915_WRITE(IVB_CHICKEN3,
8576 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8577 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8579 /* WaDisablePSDDualDispatchEnable:ivb */
8580 if (IS_IVB_GT1(dev_priv))
8581 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
8582 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
8584 /* WaDisable_RenderCache_OperationalFlush:ivb */
8585 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8587 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
8588 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8589 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8591 /* WaApplyL3ControlAndL3ChickenMode:ivb */
8592 I915_WRITE(GEN7_L3CNTLREG1,
8593 GEN7_WA_FOR_GEN7_L3_CONTROL);
8594 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8595 GEN7_WA_L3_CHICKEN_MODE);
8596 if (IS_IVB_GT1(dev_priv))
8597 I915_WRITE(GEN7_ROW_CHICKEN2,
8598 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8600 /* must write both registers */
8601 I915_WRITE(GEN7_ROW_CHICKEN2,
8602 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8603 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
8604 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8607 /* WaForceL3Serialization:ivb */
8608 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
8609 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
8612 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8613 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
8615 I915_WRITE(GEN6_UCGCTL2,
8616 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8618 /* This is required by WaCatErrorRejectionIssue:ivb */
8619 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8620 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8621 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8623 g4x_disable_trickle_feed(dev_priv);
8625 gen7_setup_fixed_func_scheduler(dev_priv);
8627 if (0) { /* causes HiZ corruption on ivb:gt1 */
8628 /* enable HiZ Raw Stall Optimization */
8629 I915_WRITE(CACHE_MODE_0_GEN7,
8630 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
8633 /* WaDisable4x2SubspanOptimization:ivb */
8634 I915_WRITE(CACHE_MODE_1,
8635 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
8638 * BSpec recommends 8x4 when MSAA is used,
8639 * however in practice 16x4 seems fastest.
8641 * Note that PS/WM thread counts depend on the WIZ hashing
8642 * disable bit, which we don't touch here, but it's good
8643 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8645 I915_WRITE(GEN7_GT_MODE,
8646 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8648 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
8649 snpcr &= ~GEN6_MBC_SNPCR_MASK;
8650 snpcr |= GEN6_MBC_SNPCR_MED;
8651 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
8653 if (!HAS_PCH_NOP(dev_priv))
8654 cpt_init_clock_gating(dev_priv);
8656 gen6_check_mch_setup(dev_priv);
8659 static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
8661 /* WaDisableEarlyCull:vlv */
8662 I915_WRITE(_3D_CHICKEN3,
8663 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
8665 /* WaDisableBackToBackFlipFix:vlv */
8666 I915_WRITE(IVB_CHICKEN3,
8667 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8668 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8670 /* WaPsdDispatchEnable:vlv */
8671 /* WaDisablePSDDualDispatchEnable:vlv */
8672 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
8673 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
8674 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
8676 /* WaDisable_RenderCache_OperationalFlush:vlv */
8677 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8679 /* WaForceL3Serialization:vlv */
8680 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
8681 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
8683 /* WaDisableDopClockGating:vlv */
8684 I915_WRITE(GEN7_ROW_CHICKEN2,
8685 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8687 /* This is required by WaCatErrorRejectionIssue:vlv */
8688 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8689 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8690 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8692 gen7_setup_fixed_func_scheduler(dev_priv);
8695 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8696 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
8698 I915_WRITE(GEN6_UCGCTL2,
8699 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8701 /* WaDisableL3Bank2xClockGate:vlv
8702 * Disabling L3 clock gating- MMIO 940c[25] = 1
8703 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
8704 I915_WRITE(GEN7_UCGCTL4,
8705 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
8708 * BSpec says this must be set, even though
8709 * WaDisable4x2SubspanOptimization isn't listed for VLV.
8711 I915_WRITE(CACHE_MODE_1,
8712 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
8715 * BSpec recommends 8x4 when MSAA is used,
8716 * however in practice 16x4 seems fastest.
8718 * Note that PS/WM thread counts depend on the WIZ hashing
8719 * disable bit, which we don't touch here, but it's good
8720 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8722 I915_WRITE(GEN7_GT_MODE,
8723 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8726 * WaIncreaseL3CreditsForVLVB0:vlv
8727 * This is the hardware default actually.
8729 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
8732 * WaDisableVLVClockGating_VBIIssue:vlv
8733 * Disable clock gating on th GCFG unit to prevent a delay
8734 * in the reporting of vblank events.
8736 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
8739 static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
8741 /* WaVSRefCountFullforceMissDisable:chv */
8742 /* WaDSRefCountFullforceMissDisable:chv */
8743 I915_WRITE(GEN7_FF_THREAD_MODE,
8744 I915_READ(GEN7_FF_THREAD_MODE) &
8745 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
8747 /* WaDisableSemaphoreAndSyncFlipWait:chv */
8748 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
8749 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
8751 /* WaDisableCSUnitClockGating:chv */
8752 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
8753 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
8755 /* WaDisableSDEUnitClockGating:chv */
8756 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
8757 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8760 * WaProgramL3SqcReg1Default:chv
8761 * See gfxspecs/Related Documents/Performance Guide/
8762 * LSQC Setting Recommendations.
8764 gen8_set_l3sqc_credits(dev_priv, 38, 2);
8767 * GTT cache may not work with big pages, so if those
8768 * are ever enabled GTT cache may need to be disabled.
8770 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
8773 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
8775 uint32_t dspclk_gate;
8777 I915_WRITE(RENCLK_GATE_D1, 0);
8778 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8779 GS_UNIT_CLOCK_GATE_DISABLE |
8780 CL_UNIT_CLOCK_GATE_DISABLE);
8781 I915_WRITE(RAMCLK_GATE_D, 0);
8782 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8783 OVRUNIT_CLOCK_GATE_DISABLE |
8784 OVCUNIT_CLOCK_GATE_DISABLE;
8785 if (IS_GM45(dev_priv))
8786 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8787 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8789 /* WaDisableRenderCachePipelinedFlush */
8790 I915_WRITE(CACHE_MODE_0,
8791 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
8793 /* WaDisable_RenderCache_OperationalFlush:g4x */
8794 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8796 g4x_disable_trickle_feed(dev_priv);
8799 static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
8801 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8802 I915_WRITE(RENCLK_GATE_D2, 0);
8803 I915_WRITE(DSPCLK_GATE_D, 0);
8804 I915_WRITE(RAMCLK_GATE_D, 0);
8805 I915_WRITE16(DEUC, 0);
8806 I915_WRITE(MI_ARB_STATE,
8807 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
8809 /* WaDisable_RenderCache_OperationalFlush:gen4 */
8810 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8813 static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
8815 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8816 I965_RCC_CLOCK_GATE_DISABLE |
8817 I965_RCPB_CLOCK_GATE_DISABLE |
8818 I965_ISC_CLOCK_GATE_DISABLE |
8819 I965_FBC_CLOCK_GATE_DISABLE);
8820 I915_WRITE(RENCLK_GATE_D2, 0);
8821 I915_WRITE(MI_ARB_STATE,
8822 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
8824 /* WaDisable_RenderCache_OperationalFlush:gen4 */
8825 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8828 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
8830 u32 dstate = I915_READ(D_STATE);
8832 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8833 DSTATE_DOT_CLOCK_GATING;
8834 I915_WRITE(D_STATE, dstate);
8836 if (IS_PINEVIEW(dev_priv))
8837 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
8839 /* IIR "flip pending" means done if this bit is set */
8840 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
8842 /* interrupts should cause a wake up from C3 */
8843 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
8845 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
8846 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
8848 I915_WRITE(MI_ARB_STATE,
8849 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
8852 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
8854 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8856 /* interrupts should cause a wake up from C3 */
8857 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
8858 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
8860 I915_WRITE(MEM_MODE,
8861 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
8864 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
8866 I915_WRITE(MEM_MODE,
8867 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
8868 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
8871 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
8873 dev_priv->display.init_clock_gating(dev_priv);
8876 void intel_suspend_hw(struct drm_i915_private *dev_priv)
8878 if (HAS_PCH_LPT(dev_priv))
8879 lpt_suspend_hw(dev_priv);
8882 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
8884 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
8888 * intel_init_clock_gating_hooks - setup the clock gating hooks
8889 * @dev_priv: device private
8891 * Setup the hooks that configure which clocks of a given platform can be
8892 * gated and also apply various GT and display specific workarounds for these
8893 * platforms. Note that some GT specific workarounds are applied separately
8894 * when GPU contexts or batchbuffers start their execution.
8896 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
8898 if (IS_SKYLAKE(dev_priv))
8899 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
8900 else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
8901 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
8902 else if (IS_BROXTON(dev_priv))
8903 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
8904 else if (IS_GEMINILAKE(dev_priv))
8905 dev_priv->display.init_clock_gating = glk_init_clock_gating;
8906 else if (IS_BROADWELL(dev_priv))
8907 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
8908 else if (IS_CHERRYVIEW(dev_priv))
8909 dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
8910 else if (IS_HASWELL(dev_priv))
8911 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
8912 else if (IS_IVYBRIDGE(dev_priv))
8913 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
8914 else if (IS_VALLEYVIEW(dev_priv))
8915 dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
8916 else if (IS_GEN6(dev_priv))
8917 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
8918 else if (IS_GEN5(dev_priv))
8919 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
8920 else if (IS_G4X(dev_priv))
8921 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
8922 else if (IS_I965GM(dev_priv))
8923 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
8924 else if (IS_I965G(dev_priv))
8925 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
8926 else if (IS_GEN3(dev_priv))
8927 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
8928 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
8929 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8930 else if (IS_GEN2(dev_priv))
8931 dev_priv->display.init_clock_gating = i830_init_clock_gating;
8933 MISSING_CASE(INTEL_DEVID(dev_priv));
8934 dev_priv->display.init_clock_gating = nop_init_clock_gating;
8938 /* Set up chip specific power management-related functions */
8939 void intel_init_pm(struct drm_i915_private *dev_priv)
8941 intel_fbc_init(dev_priv);
8944 if (IS_PINEVIEW(dev_priv))
8945 i915_pineview_get_mem_freq(dev_priv);
8946 else if (IS_GEN5(dev_priv))
8947 i915_ironlake_get_mem_freq(dev_priv);
8949 /* For FIFO watermark updates */
8950 if (INTEL_GEN(dev_priv) >= 9) {
8951 skl_setup_wm_latency(dev_priv);
8952 dev_priv->display.initial_watermarks = skl_initial_wm;
8953 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
8954 dev_priv->display.compute_global_watermarks = skl_compute_wm;
8955 } else if (HAS_PCH_SPLIT(dev_priv)) {
8956 ilk_setup_wm_latency(dev_priv);
8958 if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
8959 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
8960 (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
8961 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
8962 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
8963 dev_priv->display.compute_intermediate_wm =
8964 ilk_compute_intermediate_wm;
8965 dev_priv->display.initial_watermarks =
8966 ilk_initial_watermarks;
8967 dev_priv->display.optimize_watermarks =
8968 ilk_optimize_watermarks;
8970 DRM_DEBUG_KMS("Failed to read display plane latency. "
8973 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8974 vlv_setup_wm_latency(dev_priv);
8975 dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
8976 dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
8977 dev_priv->display.initial_watermarks = vlv_initial_watermarks;
8978 dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
8979 dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
8980 } else if (IS_G4X(dev_priv)) {
8981 g4x_setup_wm_latency(dev_priv);
8982 dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
8983 dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
8984 dev_priv->display.initial_watermarks = g4x_initial_watermarks;
8985 dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
8986 } else if (IS_PINEVIEW(dev_priv)) {
8987 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
8990 dev_priv->mem_freq)) {
8991 DRM_INFO("failed to find known CxSR latency "
8992 "(found ddr%s fsb freq %d, mem freq %d), "
8994 (dev_priv->is_ddr3 == 1) ? "3" : "2",
8995 dev_priv->fsb_freq, dev_priv->mem_freq);
8996 /* Disable CxSR and never update its watermark again */
8997 intel_set_memory_cxsr(dev_priv, false);
8998 dev_priv->display.update_wm = NULL;
9000 dev_priv->display.update_wm = pineview_update_wm;
9001 } else if (IS_GEN4(dev_priv)) {
9002 dev_priv->display.update_wm = i965_update_wm;
9003 } else if (IS_GEN3(dev_priv)) {
9004 dev_priv->display.update_wm = i9xx_update_wm;
9005 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9006 } else if (IS_GEN2(dev_priv)) {
9007 if (INTEL_INFO(dev_priv)->num_pipes == 1) {
9008 dev_priv->display.update_wm = i845_update_wm;
9009 dev_priv->display.get_fifo_size = i845_get_fifo_size;
9011 dev_priv->display.update_wm = i9xx_update_wm;
9012 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9015 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
9019 static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
9022 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
9025 case GEN6_PCODE_SUCCESS:
9027 case GEN6_PCODE_UNIMPLEMENTED_CMD:
9029 case GEN6_PCODE_ILLEGAL_CMD:
9031 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
9032 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
9034 case GEN6_PCODE_TIMEOUT:
9037 MISSING_CASE(flags);
9042 static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
9045 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
9048 case GEN6_PCODE_SUCCESS:
9050 case GEN6_PCODE_ILLEGAL_CMD:
9052 case GEN7_PCODE_TIMEOUT:
9054 case GEN7_PCODE_ILLEGAL_DATA:
9056 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
9059 MISSING_CASE(flags);
9064 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
9068 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
9070 /* GEN6_PCODE_* are outside of the forcewake domain, we can
9071 * use te fw I915_READ variants to reduce the amount of work
9072 * required when reading/writing.
9075 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
9076 DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps\n",
9077 mbox, __builtin_return_address(0));
9081 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
9082 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
9083 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
9085 if (__intel_wait_for_register_fw(dev_priv,
9086 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
9088 DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
9089 mbox, __builtin_return_address(0));
9093 *val = I915_READ_FW(GEN6_PCODE_DATA);
9094 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
9096 if (INTEL_GEN(dev_priv) > 6)
9097 status = gen7_check_mailbox_status(dev_priv);
9099 status = gen6_check_mailbox_status(dev_priv);
9102 DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
9103 mbox, __builtin_return_address(0), status);
9110 int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
9111 u32 mbox, u32 val, int timeout_us)
9115 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
9117 /* GEN6_PCODE_* are outside of the forcewake domain, we can
9118 * use te fw I915_READ variants to reduce the amount of work
9119 * required when reading/writing.
9122 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
9123 DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps\n",
9124 val, mbox, __builtin_return_address(0));
9128 I915_WRITE_FW(GEN6_PCODE_DATA, val);
9129 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
9130 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
9132 if (__intel_wait_for_register_fw(dev_priv,
9133 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
9134 timeout_us, 0, NULL)) {
9135 DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
9136 val, mbox, __builtin_return_address(0));
9140 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
9142 if (INTEL_GEN(dev_priv) > 6)
9143 status = gen7_check_mailbox_status(dev_priv);
9145 status = gen6_check_mailbox_status(dev_priv);
9148 DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
9149 val, mbox, __builtin_return_address(0), status);
9156 static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
9157 u32 request, u32 reply_mask, u32 reply,
9162 *status = sandybridge_pcode_read(dev_priv, mbox, &val);
9164 return *status || ((val & reply_mask) == reply);
9168 * skl_pcode_request - send PCODE request until acknowledgment
9169 * @dev_priv: device private
9170 * @mbox: PCODE mailbox ID the request is targeted for
9171 * @request: request ID
9172 * @reply_mask: mask used to check for request acknowledgment
9173 * @reply: value used to check for request acknowledgment
9174 * @timeout_base_ms: timeout for polling with preemption enabled
9176 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
9177 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
9178 * The request is acknowledged once the PCODE reply dword equals @reply after
9179 * applying @reply_mask. Polling is first attempted with preemption enabled
9180 * for @timeout_base_ms and if this times out for another 50 ms with
9181 * preemption disabled.
9183 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
9184 * other error as reported by PCODE.
9186 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
9187 u32 reply_mask, u32 reply, int timeout_base_ms)
9192 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
9194 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
9198 * Prime the PCODE by doing a request first. Normally it guarantees
9199 * that a subsequent request, at most @timeout_base_ms later, succeeds.
9200 * _wait_for() doesn't guarantee when its passed condition is evaluated
9201 * first, so send the first request explicitly.
9207 ret = _wait_for(COND, timeout_base_ms * 1000, 10);
9212 * The above can time out if the number of requests was low (2 in the
9213 * worst case) _and_ PCODE was busy for some reason even after a
9214 * (queued) request and @timeout_base_ms delay. As a workaround retry
9215 * the poll with preemption disabled to maximize the number of
9216 * requests. Increase the timeout from @timeout_base_ms to 50ms to
9217 * account for interrupts that could reduce the number of these
9218 * requests, and for any quirks of the PCODE firmware that delays
9219 * the request completion.
9221 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
9222 WARN_ON_ONCE(timeout_base_ms > 3);
9224 ret = wait_for_atomic(COND, 50);
9228 return ret ? ret : status;
9232 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
9236 * Slow = Fast = GPLL ref * N
9238 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
9241 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
9243 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
9246 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
9250 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
9252 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
9255 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
9257 /* CHV needs even values */
9258 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
9261 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
9263 if (INTEL_GEN(dev_priv) >= 9)
9264 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
9266 else if (IS_CHERRYVIEW(dev_priv))
9267 return chv_gpu_freq(dev_priv, val);
9268 else if (IS_VALLEYVIEW(dev_priv))
9269 return byt_gpu_freq(dev_priv, val);
9271 return val * GT_FREQUENCY_MULTIPLIER;
9274 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
9276 if (INTEL_GEN(dev_priv) >= 9)
9277 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
9278 GT_FREQUENCY_MULTIPLIER);
9279 else if (IS_CHERRYVIEW(dev_priv))
9280 return chv_freq_opcode(dev_priv, val);
9281 else if (IS_VALLEYVIEW(dev_priv))
9282 return byt_freq_opcode(dev_priv, val);
9284 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
9287 struct request_boost {
9288 struct work_struct work;
9289 struct drm_i915_gem_request *req;
9292 static void __intel_rps_boost_work(struct work_struct *work)
9294 struct request_boost *boost = container_of(work, struct request_boost, work);
9295 struct drm_i915_gem_request *req = boost->req;
9297 if (!i915_gem_request_completed(req))
9298 gen6_rps_boost(req, NULL);
9300 i915_gem_request_put(req);
9304 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
9306 struct request_boost *boost;
9308 if (req == NULL || INTEL_GEN(req->i915) < 6)
9311 if (i915_gem_request_completed(req))
9314 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
9318 boost->req = i915_gem_request_get(req);
9320 INIT_WORK(&boost->work, __intel_rps_boost_work);
9321 queue_work(req->i915->wq, &boost->work);
9324 void intel_pm_setup(struct drm_i915_private *dev_priv)
9326 mutex_init(&dev_priv->rps.hw_lock);
9328 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
9329 __intel_autoenable_gt_powersave);
9330 atomic_set(&dev_priv->rps.num_waiters, 0);
9332 dev_priv->pm.suspended = false;
9333 atomic_set(&dev_priv->pm.wakeref_count, 0);
9336 static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
9337 const i915_reg_t reg)
9339 u32 lower, upper, tmp;
9342 /* The register accessed do not need forcewake. We borrow
9343 * uncore lock to prevent concurrent access to range reg.
9345 spin_lock_irq(&dev_priv->uncore.lock);
9347 /* vlv and chv residency counters are 40 bits in width.
9348 * With a control bit, we can choose between upper or lower
9349 * 32bit window into this counter.
9351 * Although we always use the counter in high-range mode elsewhere,
9352 * userspace may attempt to read the value before rc6 is initialised,
9353 * before we have set the default VLV_COUNTER_CONTROL value. So always
9354 * set the high bit to be safe.
9356 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9357 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
9358 upper = I915_READ_FW(reg);
9362 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9363 _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
9364 lower = I915_READ_FW(reg);
9366 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9367 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
9368 upper = I915_READ_FW(reg);
9369 } while (upper != tmp && --loop);
9371 /* Everywhere else we always use VLV_COUNTER_CONTROL with the
9372 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
9376 spin_unlock_irq(&dev_priv->uncore.lock);
9378 return lower | (u64)upper << 8;
9381 u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
9382 const i915_reg_t reg)
9384 u64 time_hw, units, div;
9386 if (!intel_enable_rc6())
9389 intel_runtime_pm_get(dev_priv);
9391 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
9392 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9394 div = dev_priv->czclk_freq;
9396 time_hw = vlv_residency_raw(dev_priv, reg);
9397 } else if (IS_GEN9_LP(dev_priv)) {
9399 div = 1200; /* 833.33ns */
9401 time_hw = I915_READ(reg);
9403 units = 128000; /* 1.28us */
9406 time_hw = I915_READ(reg);
9409 intel_runtime_pm_put(dev_priv);
9410 return DIV_ROUND_UP_ULL(time_hw * units, div);