2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
54 #define INTEL_RC6_ENABLE (1<<0)
55 #define INTEL_RC6p_ENABLE (1<<1)
56 #define INTEL_RC6pp_ENABLE (1<<2)
58 static void gen9_init_clock_gating(struct drm_device *dev)
60 struct drm_i915_private *dev_priv = dev->dev_private;
62 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
63 I915_WRITE(CHICKEN_PAR1_1,
64 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
66 I915_WRITE(GEN8_CONFIG0,
67 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
69 /* WaEnableChickenDCPR:skl,bxt,kbl */
70 I915_WRITE(GEN8_CHICKEN_DCPR_1,
71 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
73 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
74 /* WaFbcWakeMemOn:skl,bxt,kbl */
75 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
77 DISP_FBC_MEMORY_WAKE);
79 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
80 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
81 ILK_DPFC_DISABLE_DUMMY0);
84 static void bxt_init_clock_gating(struct drm_device *dev)
86 struct drm_i915_private *dev_priv = to_i915(dev);
88 gen9_init_clock_gating(dev);
90 /* WaDisableSDEUnitClockGating:bxt */
91 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
92 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
96 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
98 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
99 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
102 * Wa: Backlight PWM may stop in the asserted state, causing backlight
105 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
106 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
107 PWM1_GATING_DIS | PWM2_GATING_DIS);
109 * Lower the display internal timeout.
110 * This is needed to avoid any hard hangs when DSI port PLL
111 * is off and a MMIO access is attempted by any privilege
112 * application, using batch buffers or any other means.
114 I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
117 static void i915_pineview_get_mem_freq(struct drm_device *dev)
119 struct drm_i915_private *dev_priv = to_i915(dev);
122 tmp = I915_READ(CLKCFG);
124 switch (tmp & CLKCFG_FSB_MASK) {
126 dev_priv->fsb_freq = 533; /* 133*4 */
129 dev_priv->fsb_freq = 800; /* 200*4 */
132 dev_priv->fsb_freq = 667; /* 167*4 */
135 dev_priv->fsb_freq = 400; /* 100*4 */
139 switch (tmp & CLKCFG_MEM_MASK) {
141 dev_priv->mem_freq = 533;
144 dev_priv->mem_freq = 667;
147 dev_priv->mem_freq = 800;
151 /* detect pineview DDR3 setting */
152 tmp = I915_READ(CSHRDDR3CTL);
153 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
156 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
158 struct drm_i915_private *dev_priv = to_i915(dev);
161 ddrpll = I915_READ16(DDRMPLL1);
162 csipll = I915_READ16(CSIPLL0);
164 switch (ddrpll & 0xff) {
166 dev_priv->mem_freq = 800;
169 dev_priv->mem_freq = 1066;
172 dev_priv->mem_freq = 1333;
175 dev_priv->mem_freq = 1600;
178 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
180 dev_priv->mem_freq = 0;
184 dev_priv->ips.r_t = dev_priv->mem_freq;
186 switch (csipll & 0x3ff) {
188 dev_priv->fsb_freq = 3200;
191 dev_priv->fsb_freq = 3733;
194 dev_priv->fsb_freq = 4266;
197 dev_priv->fsb_freq = 4800;
200 dev_priv->fsb_freq = 5333;
203 dev_priv->fsb_freq = 5866;
206 dev_priv->fsb_freq = 6400;
209 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
211 dev_priv->fsb_freq = 0;
215 if (dev_priv->fsb_freq == 3200) {
216 dev_priv->ips.c_m = 0;
217 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
218 dev_priv->ips.c_m = 1;
220 dev_priv->ips.c_m = 2;
224 static const struct cxsr_latency cxsr_latency_table[] = {
225 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
226 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
227 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
228 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
229 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
231 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
232 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
233 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
234 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
235 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
237 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
238 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
239 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
240 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
241 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
243 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
244 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
245 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
246 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
247 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
249 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
250 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
251 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
252 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
253 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
255 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
256 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
257 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
258 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
259 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
262 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
267 const struct cxsr_latency *latency;
270 if (fsb == 0 || mem == 0)
273 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
274 latency = &cxsr_latency_table[i];
275 if (is_desktop == latency->is_desktop &&
276 is_ddr3 == latency->is_ddr3 &&
277 fsb == latency->fsb_freq && mem == latency->mem_freq)
281 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
286 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
290 mutex_lock(&dev_priv->rps.hw_lock);
292 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
294 val &= ~FORCE_DDR_HIGH_FREQ;
296 val |= FORCE_DDR_HIGH_FREQ;
297 val &= ~FORCE_DDR_LOW_FREQ;
298 val |= FORCE_DDR_FREQ_REQ_ACK;
299 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
301 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
302 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
303 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
305 mutex_unlock(&dev_priv->rps.hw_lock);
308 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
312 mutex_lock(&dev_priv->rps.hw_lock);
314 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
316 val |= DSP_MAXFIFO_PM5_ENABLE;
318 val &= ~DSP_MAXFIFO_PM5_ENABLE;
319 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
321 mutex_unlock(&dev_priv->rps.hw_lock);
324 #define FW_WM(value, plane) \
325 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
327 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
329 struct drm_device *dev = &dev_priv->drm;
332 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
333 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
334 POSTING_READ(FW_BLC_SELF_VLV);
335 dev_priv->wm.vlv.cxsr = enable;
336 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
337 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
338 POSTING_READ(FW_BLC_SELF);
339 } else if (IS_PINEVIEW(dev)) {
340 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
341 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
342 I915_WRITE(DSPFW3, val);
343 POSTING_READ(DSPFW3);
344 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
345 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
346 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
347 I915_WRITE(FW_BLC_SELF, val);
348 POSTING_READ(FW_BLC_SELF);
349 } else if (IS_I915GM(dev)) {
351 * FIXME can't find a bit like this for 915G, and
352 * and yet it does have the related watermark in
353 * FW_BLC_SELF. What's going on?
355 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
356 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
357 I915_WRITE(INSTPM, val);
358 POSTING_READ(INSTPM);
363 DRM_DEBUG_KMS("memory self-refresh is %s\n",
364 enable ? "enabled" : "disabled");
369 * Latency for FIFO fetches is dependent on several factors:
370 * - memory configuration (speed, channels)
372 * - current MCH state
373 * It can be fairly high in some situations, so here we assume a fairly
374 * pessimal value. It's a tradeoff between extra memory fetches (if we
375 * set this value too high, the FIFO will fetch frequently to stay full)
376 * and power consumption (set it too low to save power and we might see
377 * FIFO underruns and display "flicker").
379 * A value of 5us seems to be a good balance; safe for very low end
380 * platforms but not overly aggressive on lower latency configs.
382 static const int pessimal_latency_ns = 5000;
384 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
385 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
387 static int vlv_get_fifo_size(struct drm_device *dev,
388 enum pipe pipe, int plane)
390 struct drm_i915_private *dev_priv = to_i915(dev);
391 int sprite0_start, sprite1_start, size;
394 uint32_t dsparb, dsparb2, dsparb3;
396 dsparb = I915_READ(DSPARB);
397 dsparb2 = I915_READ(DSPARB2);
398 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
399 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
402 dsparb = I915_READ(DSPARB);
403 dsparb2 = I915_READ(DSPARB2);
404 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
405 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
408 dsparb2 = I915_READ(DSPARB2);
409 dsparb3 = I915_READ(DSPARB3);
410 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
411 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
419 size = sprite0_start;
422 size = sprite1_start - sprite0_start;
425 size = 512 - 1 - sprite1_start;
431 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
432 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
433 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
439 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
441 struct drm_i915_private *dev_priv = to_i915(dev);
442 uint32_t dsparb = I915_READ(DSPARB);
445 size = dsparb & 0x7f;
447 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
449 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
450 plane ? "B" : "A", size);
455 static int i830_get_fifo_size(struct drm_device *dev, int plane)
457 struct drm_i915_private *dev_priv = to_i915(dev);
458 uint32_t dsparb = I915_READ(DSPARB);
461 size = dsparb & 0x1ff;
463 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
464 size >>= 1; /* Convert to cachelines */
466 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
467 plane ? "B" : "A", size);
472 static int i845_get_fifo_size(struct drm_device *dev, int plane)
474 struct drm_i915_private *dev_priv = to_i915(dev);
475 uint32_t dsparb = I915_READ(DSPARB);
478 size = dsparb & 0x7f;
479 size >>= 2; /* Convert to cachelines */
481 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
488 /* Pineview has different values for various configs */
489 static const struct intel_watermark_params pineview_display_wm = {
490 .fifo_size = PINEVIEW_DISPLAY_FIFO,
491 .max_wm = PINEVIEW_MAX_WM,
492 .default_wm = PINEVIEW_DFT_WM,
493 .guard_size = PINEVIEW_GUARD_WM,
494 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
496 static const struct intel_watermark_params pineview_display_hplloff_wm = {
497 .fifo_size = PINEVIEW_DISPLAY_FIFO,
498 .max_wm = PINEVIEW_MAX_WM,
499 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
500 .guard_size = PINEVIEW_GUARD_WM,
501 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
503 static const struct intel_watermark_params pineview_cursor_wm = {
504 .fifo_size = PINEVIEW_CURSOR_FIFO,
505 .max_wm = PINEVIEW_CURSOR_MAX_WM,
506 .default_wm = PINEVIEW_CURSOR_DFT_WM,
507 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
508 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
510 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
511 .fifo_size = PINEVIEW_CURSOR_FIFO,
512 .max_wm = PINEVIEW_CURSOR_MAX_WM,
513 .default_wm = PINEVIEW_CURSOR_DFT_WM,
514 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
515 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
517 static const struct intel_watermark_params g4x_wm_info = {
518 .fifo_size = G4X_FIFO_SIZE,
519 .max_wm = G4X_MAX_WM,
520 .default_wm = G4X_MAX_WM,
522 .cacheline_size = G4X_FIFO_LINE_SIZE,
524 static const struct intel_watermark_params g4x_cursor_wm_info = {
525 .fifo_size = I965_CURSOR_FIFO,
526 .max_wm = I965_CURSOR_MAX_WM,
527 .default_wm = I965_CURSOR_DFT_WM,
529 .cacheline_size = G4X_FIFO_LINE_SIZE,
531 static const struct intel_watermark_params i965_cursor_wm_info = {
532 .fifo_size = I965_CURSOR_FIFO,
533 .max_wm = I965_CURSOR_MAX_WM,
534 .default_wm = I965_CURSOR_DFT_WM,
536 .cacheline_size = I915_FIFO_LINE_SIZE,
538 static const struct intel_watermark_params i945_wm_info = {
539 .fifo_size = I945_FIFO_SIZE,
540 .max_wm = I915_MAX_WM,
543 .cacheline_size = I915_FIFO_LINE_SIZE,
545 static const struct intel_watermark_params i915_wm_info = {
546 .fifo_size = I915_FIFO_SIZE,
547 .max_wm = I915_MAX_WM,
550 .cacheline_size = I915_FIFO_LINE_SIZE,
552 static const struct intel_watermark_params i830_a_wm_info = {
553 .fifo_size = I855GM_FIFO_SIZE,
554 .max_wm = I915_MAX_WM,
557 .cacheline_size = I830_FIFO_LINE_SIZE,
559 static const struct intel_watermark_params i830_bc_wm_info = {
560 .fifo_size = I855GM_FIFO_SIZE,
561 .max_wm = I915_MAX_WM/2,
564 .cacheline_size = I830_FIFO_LINE_SIZE,
566 static const struct intel_watermark_params i845_wm_info = {
567 .fifo_size = I830_FIFO_SIZE,
568 .max_wm = I915_MAX_WM,
571 .cacheline_size = I830_FIFO_LINE_SIZE,
575 * intel_calculate_wm - calculate watermark level
576 * @clock_in_khz: pixel clock
577 * @wm: chip FIFO params
578 * @cpp: bytes per pixel
579 * @latency_ns: memory latency for the platform
581 * Calculate the watermark level (the level at which the display plane will
582 * start fetching from memory again). Each chip has a different display
583 * FIFO size and allocation, so the caller needs to figure that out and pass
584 * in the correct intel_watermark_params structure.
586 * As the pixel clock runs, the FIFO will be drained at a rate that depends
587 * on the pixel size. When it reaches the watermark level, it'll start
588 * fetching FIFO line sized based chunks from memory until the FIFO fills
589 * past the watermark point. If the FIFO drains completely, a FIFO underrun
590 * will occur, and a display engine hang could result.
592 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
593 const struct intel_watermark_params *wm,
594 int fifo_size, int cpp,
595 unsigned long latency_ns)
597 long entries_required, wm_size;
600 * Note: we need to make sure we don't overflow for various clock &
602 * clocks go from a few thousand to several hundred thousand.
603 * latency is usually a few thousand
605 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
607 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
609 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
611 wm_size = fifo_size - (entries_required + wm->guard_size);
613 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
615 /* Don't promote wm_size to unsigned... */
616 if (wm_size > (long)wm->max_wm)
617 wm_size = wm->max_wm;
619 wm_size = wm->default_wm;
622 * Bspec seems to indicate that the value shouldn't be lower than
623 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
624 * Lets go for 8 which is the burst size since certain platforms
625 * already use a hardcoded 8 (which is what the spec says should be
634 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
636 struct drm_crtc *crtc, *enabled = NULL;
638 for_each_crtc(dev, crtc) {
639 if (intel_crtc_active(crtc)) {
649 static void pineview_update_wm(struct drm_crtc *unused_crtc)
651 struct drm_device *dev = unused_crtc->dev;
652 struct drm_i915_private *dev_priv = to_i915(dev);
653 struct drm_crtc *crtc;
654 const struct cxsr_latency *latency;
658 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
659 dev_priv->fsb_freq, dev_priv->mem_freq);
661 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
662 intel_set_memory_cxsr(dev_priv, false);
666 crtc = single_enabled_crtc(dev);
668 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
669 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
670 int clock = adjusted_mode->crtc_clock;
673 wm = intel_calculate_wm(clock, &pineview_display_wm,
674 pineview_display_wm.fifo_size,
675 cpp, latency->display_sr);
676 reg = I915_READ(DSPFW1);
677 reg &= ~DSPFW_SR_MASK;
678 reg |= FW_WM(wm, SR);
679 I915_WRITE(DSPFW1, reg);
680 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
683 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
684 pineview_display_wm.fifo_size,
685 cpp, latency->cursor_sr);
686 reg = I915_READ(DSPFW3);
687 reg &= ~DSPFW_CURSOR_SR_MASK;
688 reg |= FW_WM(wm, CURSOR_SR);
689 I915_WRITE(DSPFW3, reg);
691 /* Display HPLL off SR */
692 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
693 pineview_display_hplloff_wm.fifo_size,
694 cpp, latency->display_hpll_disable);
695 reg = I915_READ(DSPFW3);
696 reg &= ~DSPFW_HPLL_SR_MASK;
697 reg |= FW_WM(wm, HPLL_SR);
698 I915_WRITE(DSPFW3, reg);
700 /* cursor HPLL off SR */
701 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
702 pineview_display_hplloff_wm.fifo_size,
703 cpp, latency->cursor_hpll_disable);
704 reg = I915_READ(DSPFW3);
705 reg &= ~DSPFW_HPLL_CURSOR_MASK;
706 reg |= FW_WM(wm, HPLL_CURSOR);
707 I915_WRITE(DSPFW3, reg);
708 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
710 intel_set_memory_cxsr(dev_priv, true);
712 intel_set_memory_cxsr(dev_priv, false);
716 static bool g4x_compute_wm0(struct drm_device *dev,
718 const struct intel_watermark_params *display,
719 int display_latency_ns,
720 const struct intel_watermark_params *cursor,
721 int cursor_latency_ns,
725 struct drm_crtc *crtc;
726 const struct drm_display_mode *adjusted_mode;
727 int htotal, hdisplay, clock, cpp;
728 int line_time_us, line_count;
729 int entries, tlb_miss;
731 crtc = intel_get_crtc_for_plane(dev, plane);
732 if (!intel_crtc_active(crtc)) {
733 *cursor_wm = cursor->guard_size;
734 *plane_wm = display->guard_size;
738 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
739 clock = adjusted_mode->crtc_clock;
740 htotal = adjusted_mode->crtc_htotal;
741 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
742 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
744 /* Use the small buffer method to calculate plane watermark */
745 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
746 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
749 entries = DIV_ROUND_UP(entries, display->cacheline_size);
750 *plane_wm = entries + display->guard_size;
751 if (*plane_wm > (int)display->max_wm)
752 *plane_wm = display->max_wm;
754 /* Use the large buffer method to calculate cursor watermark */
755 line_time_us = max(htotal * 1000 / clock, 1);
756 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
757 entries = line_count * crtc->cursor->state->crtc_w * cpp;
758 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
761 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
762 *cursor_wm = entries + cursor->guard_size;
763 if (*cursor_wm > (int)cursor->max_wm)
764 *cursor_wm = (int)cursor->max_wm;
770 * Check the wm result.
772 * If any calculated watermark values is larger than the maximum value that
773 * can be programmed into the associated watermark register, that watermark
776 static bool g4x_check_srwm(struct drm_device *dev,
777 int display_wm, int cursor_wm,
778 const struct intel_watermark_params *display,
779 const struct intel_watermark_params *cursor)
781 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
782 display_wm, cursor_wm);
784 if (display_wm > display->max_wm) {
785 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
786 display_wm, display->max_wm);
790 if (cursor_wm > cursor->max_wm) {
791 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
792 cursor_wm, cursor->max_wm);
796 if (!(display_wm || cursor_wm)) {
797 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
804 static bool g4x_compute_srwm(struct drm_device *dev,
807 const struct intel_watermark_params *display,
808 const struct intel_watermark_params *cursor,
809 int *display_wm, int *cursor_wm)
811 struct drm_crtc *crtc;
812 const struct drm_display_mode *adjusted_mode;
813 int hdisplay, htotal, cpp, clock;
814 unsigned long line_time_us;
815 int line_count, line_size;
820 *display_wm = *cursor_wm = 0;
824 crtc = intel_get_crtc_for_plane(dev, plane);
825 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
826 clock = adjusted_mode->crtc_clock;
827 htotal = adjusted_mode->crtc_htotal;
828 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
829 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
831 line_time_us = max(htotal * 1000 / clock, 1);
832 line_count = (latency_ns / line_time_us + 1000) / 1000;
833 line_size = hdisplay * cpp;
835 /* Use the minimum of the small and large buffer method for primary */
836 small = ((clock * cpp / 1000) * latency_ns) / 1000;
837 large = line_count * line_size;
839 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
840 *display_wm = entries + display->guard_size;
842 /* calculate the self-refresh watermark for display cursor */
843 entries = line_count * cpp * crtc->cursor->state->crtc_w;
844 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
845 *cursor_wm = entries + cursor->guard_size;
847 return g4x_check_srwm(dev,
848 *display_wm, *cursor_wm,
852 #define FW_WM_VLV(value, plane) \
853 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
855 static void vlv_write_wm_values(struct intel_crtc *crtc,
856 const struct vlv_wm_values *wm)
858 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
859 enum pipe pipe = crtc->pipe;
861 I915_WRITE(VLV_DDL(pipe),
862 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
863 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
864 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
865 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
868 FW_WM(wm->sr.plane, SR) |
869 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
870 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
871 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
873 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
874 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
875 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
877 FW_WM(wm->sr.cursor, CURSOR_SR));
879 if (IS_CHERRYVIEW(dev_priv)) {
880 I915_WRITE(DSPFW7_CHV,
881 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
882 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
883 I915_WRITE(DSPFW8_CHV,
884 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
885 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
886 I915_WRITE(DSPFW9_CHV,
887 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
888 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
890 FW_WM(wm->sr.plane >> 9, SR_HI) |
891 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
892 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
893 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
894 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
895 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
896 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
897 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
898 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
899 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
902 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
903 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
905 FW_WM(wm->sr.plane >> 9, SR_HI) |
906 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
907 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
908 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
909 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
910 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
911 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
914 /* zero (unused) WM1 watermarks */
915 I915_WRITE(DSPFW4, 0);
916 I915_WRITE(DSPFW5, 0);
917 I915_WRITE(DSPFW6, 0);
918 I915_WRITE(DSPHOWM1, 0);
920 POSTING_READ(DSPFW1);
928 VLV_WM_LEVEL_DDR_DVFS,
931 /* latency must be in 0.1us units. */
932 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
933 unsigned int pipe_htotal,
934 unsigned int horiz_pixels,
936 unsigned int latency)
940 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
941 ret = (ret + 1) * horiz_pixels * cpp;
942 ret = DIV_ROUND_UP(ret, 64);
947 static void vlv_setup_wm_latency(struct drm_device *dev)
949 struct drm_i915_private *dev_priv = to_i915(dev);
951 /* all latencies in usec */
952 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
954 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
956 if (IS_CHERRYVIEW(dev_priv)) {
957 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
958 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
960 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
964 static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
965 struct intel_crtc *crtc,
966 const struct intel_plane_state *state,
969 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
970 int clock, htotal, cpp, width, wm;
972 if (dev_priv->wm.pri_latency[level] == 0)
975 if (!state->base.visible)
978 cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
979 clock = crtc->config->base.adjusted_mode.crtc_clock;
980 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
981 width = crtc->config->pipe_src_w;
982 if (WARN_ON(htotal == 0))
985 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
987 * FIXME the formula gives values that are
988 * too big for the cursor FIFO, and hence we
989 * would never be able to use cursors. For
990 * now just hardcode the watermark.
994 wm = vlv_wm_method2(clock, htotal, width, cpp,
995 dev_priv->wm.pri_latency[level] * 10);
998 return min_t(int, wm, USHRT_MAX);
1001 static void vlv_compute_fifo(struct intel_crtc *crtc)
1003 struct drm_device *dev = crtc->base.dev;
1004 struct vlv_wm_state *wm_state = &crtc->wm_state;
1005 struct intel_plane *plane;
1006 unsigned int total_rate = 0;
1007 const int fifo_size = 512 - 1;
1008 int fifo_extra, fifo_left = fifo_size;
1010 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1011 struct intel_plane_state *state =
1012 to_intel_plane_state(plane->base.state);
1014 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1017 if (state->base.visible) {
1018 wm_state->num_active_planes++;
1019 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1023 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1024 struct intel_plane_state *state =
1025 to_intel_plane_state(plane->base.state);
1028 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1029 plane->wm.fifo_size = 63;
1033 if (!state->base.visible) {
1034 plane->wm.fifo_size = 0;
1038 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1039 plane->wm.fifo_size = fifo_size * rate / total_rate;
1040 fifo_left -= plane->wm.fifo_size;
1043 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1045 /* spread the remainder evenly */
1046 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1052 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1055 /* give it all to the first plane if none are active */
1056 if (plane->wm.fifo_size == 0 &&
1057 wm_state->num_active_planes)
1060 plane_extra = min(fifo_extra, fifo_left);
1061 plane->wm.fifo_size += plane_extra;
1062 fifo_left -= plane_extra;
1065 WARN_ON(fifo_left != 0);
1068 static void vlv_invert_wms(struct intel_crtc *crtc)
1070 struct vlv_wm_state *wm_state = &crtc->wm_state;
1073 for (level = 0; level < wm_state->num_levels; level++) {
1074 struct drm_device *dev = crtc->base.dev;
1075 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1076 struct intel_plane *plane;
1078 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1079 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1081 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1082 switch (plane->base.type) {
1084 case DRM_PLANE_TYPE_CURSOR:
1085 wm_state->wm[level].cursor = plane->wm.fifo_size -
1086 wm_state->wm[level].cursor;
1088 case DRM_PLANE_TYPE_PRIMARY:
1089 wm_state->wm[level].primary = plane->wm.fifo_size -
1090 wm_state->wm[level].primary;
1092 case DRM_PLANE_TYPE_OVERLAY:
1093 sprite = plane->plane;
1094 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1095 wm_state->wm[level].sprite[sprite];
1102 static void vlv_compute_wm(struct intel_crtc *crtc)
1104 struct drm_device *dev = crtc->base.dev;
1105 struct vlv_wm_state *wm_state = &crtc->wm_state;
1106 struct intel_plane *plane;
1107 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1110 memset(wm_state, 0, sizeof(*wm_state));
1112 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
1113 wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
1115 wm_state->num_active_planes = 0;
1117 vlv_compute_fifo(crtc);
1119 if (wm_state->num_active_planes != 1)
1120 wm_state->cxsr = false;
1122 if (wm_state->cxsr) {
1123 for (level = 0; level < wm_state->num_levels; level++) {
1124 wm_state->sr[level].plane = sr_fifo_size;
1125 wm_state->sr[level].cursor = 63;
1129 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1130 struct intel_plane_state *state =
1131 to_intel_plane_state(plane->base.state);
1133 if (!state->base.visible)
1136 /* normal watermarks */
1137 for (level = 0; level < wm_state->num_levels; level++) {
1138 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1139 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1142 if (WARN_ON(level == 0 && wm > max_wm))
1145 if (wm > plane->wm.fifo_size)
1148 switch (plane->base.type) {
1150 case DRM_PLANE_TYPE_CURSOR:
1151 wm_state->wm[level].cursor = wm;
1153 case DRM_PLANE_TYPE_PRIMARY:
1154 wm_state->wm[level].primary = wm;
1156 case DRM_PLANE_TYPE_OVERLAY:
1157 sprite = plane->plane;
1158 wm_state->wm[level].sprite[sprite] = wm;
1163 wm_state->num_levels = level;
1165 if (!wm_state->cxsr)
1168 /* maxfifo watermarks */
1169 switch (plane->base.type) {
1171 case DRM_PLANE_TYPE_CURSOR:
1172 for (level = 0; level < wm_state->num_levels; level++)
1173 wm_state->sr[level].cursor =
1174 wm_state->wm[level].cursor;
1176 case DRM_PLANE_TYPE_PRIMARY:
1177 for (level = 0; level < wm_state->num_levels; level++)
1178 wm_state->sr[level].plane =
1179 min(wm_state->sr[level].plane,
1180 wm_state->wm[level].primary);
1182 case DRM_PLANE_TYPE_OVERLAY:
1183 sprite = plane->plane;
1184 for (level = 0; level < wm_state->num_levels; level++)
1185 wm_state->sr[level].plane =
1186 min(wm_state->sr[level].plane,
1187 wm_state->wm[level].sprite[sprite]);
1192 /* clear any (partially) filled invalid levels */
1193 for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
1194 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1195 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1198 vlv_invert_wms(crtc);
1201 #define VLV_FIFO(plane, value) \
1202 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1204 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1206 struct drm_device *dev = crtc->base.dev;
1207 struct drm_i915_private *dev_priv = to_i915(dev);
1208 struct intel_plane *plane;
1209 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1211 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1212 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1213 WARN_ON(plane->wm.fifo_size != 63);
1217 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1218 sprite0_start = plane->wm.fifo_size;
1219 else if (plane->plane == 0)
1220 sprite1_start = sprite0_start + plane->wm.fifo_size;
1222 fifo_size = sprite1_start + plane->wm.fifo_size;
1225 WARN_ON(fifo_size != 512 - 1);
1227 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1228 pipe_name(crtc->pipe), sprite0_start,
1229 sprite1_start, fifo_size);
1231 switch (crtc->pipe) {
1232 uint32_t dsparb, dsparb2, dsparb3;
1234 dsparb = I915_READ(DSPARB);
1235 dsparb2 = I915_READ(DSPARB2);
1237 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1238 VLV_FIFO(SPRITEB, 0xff));
1239 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1240 VLV_FIFO(SPRITEB, sprite1_start));
1242 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1243 VLV_FIFO(SPRITEB_HI, 0x1));
1244 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1245 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1247 I915_WRITE(DSPARB, dsparb);
1248 I915_WRITE(DSPARB2, dsparb2);
1251 dsparb = I915_READ(DSPARB);
1252 dsparb2 = I915_READ(DSPARB2);
1254 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1255 VLV_FIFO(SPRITED, 0xff));
1256 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1257 VLV_FIFO(SPRITED, sprite1_start));
1259 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1260 VLV_FIFO(SPRITED_HI, 0xff));
1261 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1262 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1264 I915_WRITE(DSPARB, dsparb);
1265 I915_WRITE(DSPARB2, dsparb2);
1268 dsparb3 = I915_READ(DSPARB3);
1269 dsparb2 = I915_READ(DSPARB2);
1271 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1272 VLV_FIFO(SPRITEF, 0xff));
1273 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1274 VLV_FIFO(SPRITEF, sprite1_start));
1276 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1277 VLV_FIFO(SPRITEF_HI, 0xff));
1278 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1279 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1281 I915_WRITE(DSPARB3, dsparb3);
1282 I915_WRITE(DSPARB2, dsparb2);
1291 static void vlv_merge_wm(struct drm_device *dev,
1292 struct vlv_wm_values *wm)
1294 struct intel_crtc *crtc;
1295 int num_active_crtcs = 0;
1297 wm->level = to_i915(dev)->wm.max_level;
1300 for_each_intel_crtc(dev, crtc) {
1301 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1306 if (!wm_state->cxsr)
1310 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1313 if (num_active_crtcs != 1)
1316 if (num_active_crtcs > 1)
1317 wm->level = VLV_WM_LEVEL_PM2;
1319 for_each_intel_crtc(dev, crtc) {
1320 struct vlv_wm_state *wm_state = &crtc->wm_state;
1321 enum pipe pipe = crtc->pipe;
1326 wm->pipe[pipe] = wm_state->wm[wm->level];
1328 wm->sr = wm_state->sr[wm->level];
1330 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1331 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1332 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1333 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1337 static void vlv_update_wm(struct drm_crtc *crtc)
1339 struct drm_device *dev = crtc->dev;
1340 struct drm_i915_private *dev_priv = to_i915(dev);
1341 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1342 enum pipe pipe = intel_crtc->pipe;
1343 struct vlv_wm_values wm = {};
1345 vlv_compute_wm(intel_crtc);
1346 vlv_merge_wm(dev, &wm);
1348 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1349 /* FIXME should be part of crtc atomic commit */
1350 vlv_pipe_set_fifo_size(intel_crtc);
1354 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1355 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1356 chv_set_memory_dvfs(dev_priv, false);
1358 if (wm.level < VLV_WM_LEVEL_PM5 &&
1359 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1360 chv_set_memory_pm5(dev_priv, false);
1362 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
1363 intel_set_memory_cxsr(dev_priv, false);
1365 /* FIXME should be part of crtc atomic commit */
1366 vlv_pipe_set_fifo_size(intel_crtc);
1368 vlv_write_wm_values(intel_crtc, &wm);
1370 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1371 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1372 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1373 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1374 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1376 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
1377 intel_set_memory_cxsr(dev_priv, true);
1379 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1380 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1381 chv_set_memory_pm5(dev_priv, true);
1383 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1384 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1385 chv_set_memory_dvfs(dev_priv, true);
1387 dev_priv->wm.vlv = wm;
1390 #define single_plane_enabled(mask) is_power_of_2(mask)
1392 static void g4x_update_wm(struct drm_crtc *crtc)
1394 struct drm_device *dev = crtc->dev;
1395 static const int sr_latency_ns = 12000;
1396 struct drm_i915_private *dev_priv = to_i915(dev);
1397 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1398 int plane_sr, cursor_sr;
1399 unsigned int enabled = 0;
1402 if (g4x_compute_wm0(dev, PIPE_A,
1403 &g4x_wm_info, pessimal_latency_ns,
1404 &g4x_cursor_wm_info, pessimal_latency_ns,
1405 &planea_wm, &cursora_wm))
1406 enabled |= 1 << PIPE_A;
1408 if (g4x_compute_wm0(dev, PIPE_B,
1409 &g4x_wm_info, pessimal_latency_ns,
1410 &g4x_cursor_wm_info, pessimal_latency_ns,
1411 &planeb_wm, &cursorb_wm))
1412 enabled |= 1 << PIPE_B;
1414 if (single_plane_enabled(enabled) &&
1415 g4x_compute_srwm(dev, ffs(enabled) - 1,
1418 &g4x_cursor_wm_info,
1419 &plane_sr, &cursor_sr)) {
1420 cxsr_enabled = true;
1422 cxsr_enabled = false;
1423 intel_set_memory_cxsr(dev_priv, false);
1424 plane_sr = cursor_sr = 0;
1427 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1428 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1429 planea_wm, cursora_wm,
1430 planeb_wm, cursorb_wm,
1431 plane_sr, cursor_sr);
1434 FW_WM(plane_sr, SR) |
1435 FW_WM(cursorb_wm, CURSORB) |
1436 FW_WM(planeb_wm, PLANEB) |
1437 FW_WM(planea_wm, PLANEA));
1439 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1440 FW_WM(cursora_wm, CURSORA));
1441 /* HPLL off in SR has some issues on G4x... disable it */
1443 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1444 FW_WM(cursor_sr, CURSOR_SR));
1447 intel_set_memory_cxsr(dev_priv, true);
1450 static void i965_update_wm(struct drm_crtc *unused_crtc)
1452 struct drm_device *dev = unused_crtc->dev;
1453 struct drm_i915_private *dev_priv = to_i915(dev);
1454 struct drm_crtc *crtc;
1459 /* Calc sr entries for one plane configs */
1460 crtc = single_enabled_crtc(dev);
1462 /* self-refresh has much higher latency */
1463 static const int sr_latency_ns = 12000;
1464 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1465 int clock = adjusted_mode->crtc_clock;
1466 int htotal = adjusted_mode->crtc_htotal;
1467 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
1468 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1469 unsigned long line_time_us;
1472 line_time_us = max(htotal * 1000 / clock, 1);
1474 /* Use ns/us then divide to preserve precision */
1475 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1477 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1478 srwm = I965_FIFO_SIZE - entries;
1482 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1485 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1486 cpp * crtc->cursor->state->crtc_w;
1487 entries = DIV_ROUND_UP(entries,
1488 i965_cursor_wm_info.cacheline_size);
1489 cursor_sr = i965_cursor_wm_info.fifo_size -
1490 (entries + i965_cursor_wm_info.guard_size);
1492 if (cursor_sr > i965_cursor_wm_info.max_wm)
1493 cursor_sr = i965_cursor_wm_info.max_wm;
1495 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1496 "cursor %d\n", srwm, cursor_sr);
1498 cxsr_enabled = true;
1500 cxsr_enabled = false;
1501 /* Turn off self refresh if both pipes are enabled */
1502 intel_set_memory_cxsr(dev_priv, false);
1505 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1508 /* 965 has limitations... */
1509 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1513 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1514 FW_WM(8, PLANEC_OLD));
1515 /* update cursor SR watermark */
1516 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1519 intel_set_memory_cxsr(dev_priv, true);
1524 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1526 struct drm_device *dev = unused_crtc->dev;
1527 struct drm_i915_private *dev_priv = to_i915(dev);
1528 const struct intel_watermark_params *wm_info;
1533 int planea_wm, planeb_wm;
1534 struct drm_crtc *crtc, *enabled = NULL;
1537 wm_info = &i945_wm_info;
1538 else if (!IS_GEN2(dev))
1539 wm_info = &i915_wm_info;
1541 wm_info = &i830_a_wm_info;
1543 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1544 crtc = intel_get_crtc_for_plane(dev, 0);
1545 if (intel_crtc_active(crtc)) {
1546 const struct drm_display_mode *adjusted_mode;
1547 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1551 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1552 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1553 wm_info, fifo_size, cpp,
1554 pessimal_latency_ns);
1557 planea_wm = fifo_size - wm_info->guard_size;
1558 if (planea_wm > (long)wm_info->max_wm)
1559 planea_wm = wm_info->max_wm;
1563 wm_info = &i830_bc_wm_info;
1565 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1566 crtc = intel_get_crtc_for_plane(dev, 1);
1567 if (intel_crtc_active(crtc)) {
1568 const struct drm_display_mode *adjusted_mode;
1569 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1573 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1574 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1575 wm_info, fifo_size, cpp,
1576 pessimal_latency_ns);
1577 if (enabled == NULL)
1582 planeb_wm = fifo_size - wm_info->guard_size;
1583 if (planeb_wm > (long)wm_info->max_wm)
1584 planeb_wm = wm_info->max_wm;
1587 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1589 if (IS_I915GM(dev) && enabled) {
1590 struct drm_i915_gem_object *obj;
1592 obj = intel_fb_obj(enabled->primary->state->fb);
1594 /* self-refresh seems busted with untiled */
1595 if (!i915_gem_object_is_tiled(obj))
1600 * Overlay gets an aggressive default since video jitter is bad.
1604 /* Play safe and disable self-refresh before adjusting watermarks. */
1605 intel_set_memory_cxsr(dev_priv, false);
1607 /* Calc sr entries for one plane configs */
1608 if (HAS_FW_BLC(dev) && enabled) {
1609 /* self-refresh has much higher latency */
1610 static const int sr_latency_ns = 6000;
1611 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
1612 int clock = adjusted_mode->crtc_clock;
1613 int htotal = adjusted_mode->crtc_htotal;
1614 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
1615 int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
1616 unsigned long line_time_us;
1619 if (IS_I915GM(dev) || IS_I945GM(dev))
1622 line_time_us = max(htotal * 1000 / clock, 1);
1624 /* Use ns/us then divide to preserve precision */
1625 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1627 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1628 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1629 srwm = wm_info->fifo_size - entries;
1633 if (IS_I945G(dev) || IS_I945GM(dev))
1634 I915_WRITE(FW_BLC_SELF,
1635 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1637 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1640 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1641 planea_wm, planeb_wm, cwm, srwm);
1643 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1644 fwater_hi = (cwm & 0x1f);
1646 /* Set request length to 8 cachelines per fetch */
1647 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1648 fwater_hi = fwater_hi | (1 << 8);
1650 I915_WRITE(FW_BLC, fwater_lo);
1651 I915_WRITE(FW_BLC2, fwater_hi);
1654 intel_set_memory_cxsr(dev_priv, true);
1657 static void i845_update_wm(struct drm_crtc *unused_crtc)
1659 struct drm_device *dev = unused_crtc->dev;
1660 struct drm_i915_private *dev_priv = to_i915(dev);
1661 struct drm_crtc *crtc;
1662 const struct drm_display_mode *adjusted_mode;
1666 crtc = single_enabled_crtc(dev);
1670 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1671 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1673 dev_priv->display.get_fifo_size(dev, 0),
1674 4, pessimal_latency_ns);
1675 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1676 fwater_lo |= (3<<8) | planea_wm;
1678 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1680 I915_WRITE(FW_BLC, fwater_lo);
1683 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
1685 uint32_t pixel_rate;
1687 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
1689 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1690 * adjust the pixel_rate here. */
1692 if (pipe_config->pch_pfit.enabled) {
1693 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1694 uint32_t pfit_size = pipe_config->pch_pfit.size;
1696 pipe_w = pipe_config->pipe_src_w;
1697 pipe_h = pipe_config->pipe_src_h;
1699 pfit_w = (pfit_size >> 16) & 0xFFFF;
1700 pfit_h = pfit_size & 0xFFFF;
1701 if (pipe_w < pfit_w)
1703 if (pipe_h < pfit_h)
1706 if (WARN_ON(!pfit_w || !pfit_h))
1709 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1716 /* latency must be in 0.1us units. */
1717 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
1721 if (WARN(latency == 0, "Latency value missing\n"))
1724 ret = (uint64_t) pixel_rate * cpp * latency;
1725 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1730 /* latency must be in 0.1us units. */
1731 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1732 uint32_t horiz_pixels, uint8_t cpp,
1737 if (WARN(latency == 0, "Latency value missing\n"))
1739 if (WARN_ON(!pipe_htotal))
1742 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1743 ret = (ret + 1) * horiz_pixels * cpp;
1744 ret = DIV_ROUND_UP(ret, 64) + 2;
1748 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1752 * Neither of these should be possible since this function shouldn't be
1753 * called if the CRTC is off or the plane is invisible. But let's be
1754 * extra paranoid to avoid a potential divide-by-zero if we screw up
1755 * elsewhere in the driver.
1759 if (WARN_ON(!horiz_pixels))
1762 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
1765 struct ilk_wm_maximums {
1773 * For both WM_PIPE and WM_LP.
1774 * mem_value must be in 0.1us units.
1776 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1777 const struct intel_plane_state *pstate,
1781 int cpp = pstate->base.fb ?
1782 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1783 uint32_t method1, method2;
1785 if (!cstate->base.active || !pstate->base.visible)
1788 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1793 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1794 cstate->base.adjusted_mode.crtc_htotal,
1795 drm_rect_width(&pstate->base.dst),
1798 return min(method1, method2);
1802 * For both WM_PIPE and WM_LP.
1803 * mem_value must be in 0.1us units.
1805 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1806 const struct intel_plane_state *pstate,
1809 int cpp = pstate->base.fb ?
1810 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1811 uint32_t method1, method2;
1813 if (!cstate->base.active || !pstate->base.visible)
1816 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1817 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1818 cstate->base.adjusted_mode.crtc_htotal,
1819 drm_rect_width(&pstate->base.dst),
1821 return min(method1, method2);
1825 * For both WM_PIPE and WM_LP.
1826 * mem_value must be in 0.1us units.
1828 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1829 const struct intel_plane_state *pstate,
1833 * We treat the cursor plane as always-on for the purposes of watermark
1834 * calculation. Until we have two-stage watermark programming merged,
1835 * this is necessary to avoid flickering.
1838 int width = pstate->base.visible ? pstate->base.crtc_w : 64;
1840 if (!cstate->base.active)
1843 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1844 cstate->base.adjusted_mode.crtc_htotal,
1845 width, cpp, mem_value);
1848 /* Only for WM_LP. */
1849 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1850 const struct intel_plane_state *pstate,
1853 int cpp = pstate->base.fb ?
1854 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1856 if (!cstate->base.active || !pstate->base.visible)
1859 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
1862 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1864 if (INTEL_INFO(dev)->gen >= 8)
1866 else if (INTEL_INFO(dev)->gen >= 7)
1872 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1873 int level, bool is_sprite)
1875 if (INTEL_INFO(dev)->gen >= 8)
1876 /* BDW primary/sprite plane watermarks */
1877 return level == 0 ? 255 : 2047;
1878 else if (INTEL_INFO(dev)->gen >= 7)
1879 /* IVB/HSW primary/sprite plane watermarks */
1880 return level == 0 ? 127 : 1023;
1881 else if (!is_sprite)
1882 /* ILK/SNB primary plane watermarks */
1883 return level == 0 ? 127 : 511;
1885 /* ILK/SNB sprite plane watermarks */
1886 return level == 0 ? 63 : 255;
1889 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1892 if (INTEL_INFO(dev)->gen >= 7)
1893 return level == 0 ? 63 : 255;
1895 return level == 0 ? 31 : 63;
1898 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1900 if (INTEL_INFO(dev)->gen >= 8)
1906 /* Calculate the maximum primary/sprite plane watermark */
1907 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1909 const struct intel_wm_config *config,
1910 enum intel_ddb_partitioning ddb_partitioning,
1913 unsigned int fifo_size = ilk_display_fifo_size(dev);
1915 /* if sprites aren't enabled, sprites get nothing */
1916 if (is_sprite && !config->sprites_enabled)
1919 /* HSW allows LP1+ watermarks even with multiple pipes */
1920 if (level == 0 || config->num_pipes_active > 1) {
1921 fifo_size /= INTEL_INFO(dev)->num_pipes;
1924 * For some reason the non self refresh
1925 * FIFO size is only half of the self
1926 * refresh FIFO size on ILK/SNB.
1928 if (INTEL_INFO(dev)->gen <= 6)
1932 if (config->sprites_enabled) {
1933 /* level 0 is always calculated with 1:1 split */
1934 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1943 /* clamp to max that the registers can hold */
1944 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1947 /* Calculate the maximum cursor plane watermark */
1948 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1950 const struct intel_wm_config *config)
1952 /* HSW LP1+ watermarks w/ multiple pipes */
1953 if (level > 0 && config->num_pipes_active > 1)
1956 /* otherwise just report max that registers can hold */
1957 return ilk_cursor_wm_reg_max(dev, level);
1960 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1962 const struct intel_wm_config *config,
1963 enum intel_ddb_partitioning ddb_partitioning,
1964 struct ilk_wm_maximums *max)
1966 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1967 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1968 max->cur = ilk_cursor_wm_max(dev, level, config);
1969 max->fbc = ilk_fbc_wm_reg_max(dev);
1972 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1974 struct ilk_wm_maximums *max)
1976 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1977 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1978 max->cur = ilk_cursor_wm_reg_max(dev, level);
1979 max->fbc = ilk_fbc_wm_reg_max(dev);
1982 static bool ilk_validate_wm_level(int level,
1983 const struct ilk_wm_maximums *max,
1984 struct intel_wm_level *result)
1988 /* already determined to be invalid? */
1989 if (!result->enable)
1992 result->enable = result->pri_val <= max->pri &&
1993 result->spr_val <= max->spr &&
1994 result->cur_val <= max->cur;
1996 ret = result->enable;
1999 * HACK until we can pre-compute everything,
2000 * and thus fail gracefully if LP0 watermarks
2003 if (level == 0 && !result->enable) {
2004 if (result->pri_val > max->pri)
2005 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2006 level, result->pri_val, max->pri);
2007 if (result->spr_val > max->spr)
2008 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2009 level, result->spr_val, max->spr);
2010 if (result->cur_val > max->cur)
2011 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2012 level, result->cur_val, max->cur);
2014 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2015 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2016 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2017 result->enable = true;
2023 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2024 const struct intel_crtc *intel_crtc,
2026 struct intel_crtc_state *cstate,
2027 struct intel_plane_state *pristate,
2028 struct intel_plane_state *sprstate,
2029 struct intel_plane_state *curstate,
2030 struct intel_wm_level *result)
2032 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2033 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2034 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2036 /* WM1+ latency values stored in 0.5us units */
2044 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2045 pri_latency, level);
2046 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2050 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2053 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2055 result->enable = true;
2059 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2061 const struct intel_atomic_state *intel_state =
2062 to_intel_atomic_state(cstate->base.state);
2063 const struct drm_display_mode *adjusted_mode =
2064 &cstate->base.adjusted_mode;
2065 u32 linetime, ips_linetime;
2067 if (!cstate->base.active)
2069 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2071 if (WARN_ON(intel_state->cdclk == 0))
2074 /* The WM are computed with base on how long it takes to fill a single
2075 * row at the given clock rate, multiplied by 8.
2077 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2078 adjusted_mode->crtc_clock);
2079 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2080 intel_state->cdclk);
2082 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2083 PIPE_WM_LINETIME_TIME(linetime);
2086 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[])
2088 struct drm_i915_private *dev_priv = to_i915(dev);
2093 int level, max_level = ilk_wm_max_level(dev);
2095 /* read the first set of memory latencies[0:3] */
2096 val = 0; /* data0 to be programmed to 0 for first set */
2097 mutex_lock(&dev_priv->rps.hw_lock);
2098 ret = sandybridge_pcode_read(dev_priv,
2099 GEN9_PCODE_READ_MEM_LATENCY,
2101 mutex_unlock(&dev_priv->rps.hw_lock);
2104 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2108 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2109 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2110 GEN9_MEM_LATENCY_LEVEL_MASK;
2111 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2112 GEN9_MEM_LATENCY_LEVEL_MASK;
2113 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2114 GEN9_MEM_LATENCY_LEVEL_MASK;
2116 /* read the second set of memory latencies[4:7] */
2117 val = 1; /* data0 to be programmed to 1 for second set */
2118 mutex_lock(&dev_priv->rps.hw_lock);
2119 ret = sandybridge_pcode_read(dev_priv,
2120 GEN9_PCODE_READ_MEM_LATENCY,
2122 mutex_unlock(&dev_priv->rps.hw_lock);
2124 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2128 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2129 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2130 GEN9_MEM_LATENCY_LEVEL_MASK;
2131 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2132 GEN9_MEM_LATENCY_LEVEL_MASK;
2133 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2134 GEN9_MEM_LATENCY_LEVEL_MASK;
2137 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2138 * need to be disabled. We make sure to sanitize the values out
2139 * of the punit to satisfy this requirement.
2141 for (level = 1; level <= max_level; level++) {
2142 if (wm[level] == 0) {
2143 for (i = level + 1; i <= max_level; i++)
2150 * WaWmMemoryReadLatency:skl
2152 * punit doesn't take into account the read latency so we need
2153 * to add 2us to the various latency levels we retrieve from the
2154 * punit when level 0 response data us 0us.
2158 for (level = 1; level <= max_level; level++) {
2165 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2166 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2168 wm[0] = (sskpd >> 56) & 0xFF;
2170 wm[0] = sskpd & 0xF;
2171 wm[1] = (sskpd >> 4) & 0xFF;
2172 wm[2] = (sskpd >> 12) & 0xFF;
2173 wm[3] = (sskpd >> 20) & 0x1FF;
2174 wm[4] = (sskpd >> 32) & 0x1FF;
2175 } else if (INTEL_INFO(dev)->gen >= 6) {
2176 uint32_t sskpd = I915_READ(MCH_SSKPD);
2178 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2179 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2180 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2181 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2182 } else if (INTEL_INFO(dev)->gen >= 5) {
2183 uint32_t mltr = I915_READ(MLTR_ILK);
2185 /* ILK primary LP0 latency is 700 ns */
2187 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2188 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2192 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2194 /* ILK sprite LP0 latency is 1300 ns */
2199 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2201 /* ILK cursor LP0 latency is 1300 ns */
2205 /* WaDoubleCursorLP3Latency:ivb */
2206 if (IS_IVYBRIDGE(dev))
2210 int ilk_wm_max_level(const struct drm_device *dev)
2212 /* how many WM levels are we expecting */
2213 if (INTEL_INFO(dev)->gen >= 9)
2215 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2217 else if (INTEL_INFO(dev)->gen >= 6)
2223 static void intel_print_wm_latency(struct drm_device *dev,
2225 const uint16_t wm[8])
2227 int level, max_level = ilk_wm_max_level(dev);
2229 for (level = 0; level <= max_level; level++) {
2230 unsigned int latency = wm[level];
2233 DRM_ERROR("%s WM%d latency not provided\n",
2239 * - latencies are in us on gen9.
2240 * - before then, WM1+ latency values are in 0.5us units
2247 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2248 name, level, wm[level],
2249 latency / 10, latency % 10);
2253 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2254 uint16_t wm[5], uint16_t min)
2256 int level, max_level = ilk_wm_max_level(&dev_priv->drm);
2261 wm[0] = max(wm[0], min);
2262 for (level = 1; level <= max_level; level++)
2263 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2268 static void snb_wm_latency_quirk(struct drm_device *dev)
2270 struct drm_i915_private *dev_priv = to_i915(dev);
2274 * The BIOS provided WM memory latency values are often
2275 * inadequate for high resolution displays. Adjust them.
2277 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12);
2278 changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12);
2279 changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2284 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2285 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2286 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2287 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2290 static void ilk_setup_wm_latency(struct drm_device *dev)
2292 struct drm_i915_private *dev_priv = to_i915(dev);
2294 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2296 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2297 sizeof(dev_priv->wm.pri_latency));
2298 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2299 sizeof(dev_priv->wm.pri_latency));
2301 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2302 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2304 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2305 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2306 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2309 snb_wm_latency_quirk(dev);
2312 static void skl_setup_wm_latency(struct drm_device *dev)
2314 struct drm_i915_private *dev_priv = to_i915(dev);
2316 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2317 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2320 static bool ilk_validate_pipe_wm(struct drm_device *dev,
2321 struct intel_pipe_wm *pipe_wm)
2323 /* LP0 watermark maximums depend on this pipe alone */
2324 const struct intel_wm_config config = {
2325 .num_pipes_active = 1,
2326 .sprites_enabled = pipe_wm->sprites_enabled,
2327 .sprites_scaled = pipe_wm->sprites_scaled,
2329 struct ilk_wm_maximums max;
2331 /* LP0 watermarks always use 1/2 DDB partitioning */
2332 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2334 /* At least LP0 must be valid */
2335 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
2336 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2343 /* Compute new watermarks for the pipe */
2344 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2346 struct drm_atomic_state *state = cstate->base.state;
2347 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2348 struct intel_pipe_wm *pipe_wm;
2349 struct drm_device *dev = state->dev;
2350 const struct drm_i915_private *dev_priv = to_i915(dev);
2351 struct intel_plane *intel_plane;
2352 struct intel_plane_state *pristate = NULL;
2353 struct intel_plane_state *sprstate = NULL;
2354 struct intel_plane_state *curstate = NULL;
2355 int level, max_level = ilk_wm_max_level(dev), usable_level;
2356 struct ilk_wm_maximums max;
2358 pipe_wm = &cstate->wm.ilk.optimal;
2360 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2361 struct intel_plane_state *ps;
2363 ps = intel_atomic_get_existing_plane_state(state,
2368 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2370 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2372 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2376 pipe_wm->pipe_enabled = cstate->base.active;
2378 pipe_wm->sprites_enabled = sprstate->base.visible;
2379 pipe_wm->sprites_scaled = sprstate->base.visible &&
2380 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
2381 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
2384 usable_level = max_level;
2386 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2387 if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
2390 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2391 if (pipe_wm->sprites_scaled)
2394 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2395 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
2397 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2398 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2400 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2401 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2403 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2406 ilk_compute_wm_reg_maximums(dev, 1, &max);
2408 for (level = 1; level <= max_level; level++) {
2409 struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
2411 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2412 pristate, sprstate, curstate, wm);
2415 * Disable any watermark level that exceeds the
2416 * register maximums since such watermarks are
2419 if (level > usable_level)
2422 if (ilk_validate_wm_level(level, &max, wm))
2423 pipe_wm->wm[level] = *wm;
2425 usable_level = level;
2432 * Build a set of 'intermediate' watermark values that satisfy both the old
2433 * state and the new state. These can be programmed to the hardware
2436 static int ilk_compute_intermediate_wm(struct drm_device *dev,
2437 struct intel_crtc *intel_crtc,
2438 struct intel_crtc_state *newstate)
2440 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2441 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2442 int level, max_level = ilk_wm_max_level(dev);
2445 * Start with the final, target watermarks, then combine with the
2446 * currently active watermarks to get values that are safe both before
2447 * and after the vblank.
2449 *a = newstate->wm.ilk.optimal;
2450 a->pipe_enabled |= b->pipe_enabled;
2451 a->sprites_enabled |= b->sprites_enabled;
2452 a->sprites_scaled |= b->sprites_scaled;
2454 for (level = 0; level <= max_level; level++) {
2455 struct intel_wm_level *a_wm = &a->wm[level];
2456 const struct intel_wm_level *b_wm = &b->wm[level];
2458 a_wm->enable &= b_wm->enable;
2459 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
2460 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
2461 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
2462 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
2466 * We need to make sure that these merged watermark values are
2467 * actually a valid configuration themselves. If they're not,
2468 * there's no safe way to transition from the old state to
2469 * the new state, so we need to fail the atomic transaction.
2471 if (!ilk_validate_pipe_wm(dev, a))
2475 * If our intermediate WM are identical to the final WM, then we can
2476 * omit the post-vblank programming; only update if it's different.
2478 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
2479 newstate->wm.need_postvbl_update = false;
2485 * Merge the watermarks from all active pipes for a specific level.
2487 static void ilk_merge_wm_level(struct drm_device *dev,
2489 struct intel_wm_level *ret_wm)
2491 const struct intel_crtc *intel_crtc;
2493 ret_wm->enable = true;
2495 for_each_intel_crtc(dev, intel_crtc) {
2496 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
2497 const struct intel_wm_level *wm = &active->wm[level];
2499 if (!active->pipe_enabled)
2503 * The watermark values may have been used in the past,
2504 * so we must maintain them in the registers for some
2505 * time even if the level is now disabled.
2508 ret_wm->enable = false;
2510 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2511 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2512 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2513 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2518 * Merge all low power watermarks for all active pipes.
2520 static void ilk_wm_merge(struct drm_device *dev,
2521 const struct intel_wm_config *config,
2522 const struct ilk_wm_maximums *max,
2523 struct intel_pipe_wm *merged)
2525 struct drm_i915_private *dev_priv = to_i915(dev);
2526 int level, max_level = ilk_wm_max_level(dev);
2527 int last_enabled_level = max_level;
2529 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2530 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2531 config->num_pipes_active > 1)
2532 last_enabled_level = 0;
2534 /* ILK: FBC WM must be disabled always */
2535 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2537 /* merge each WM1+ level */
2538 for (level = 1; level <= max_level; level++) {
2539 struct intel_wm_level *wm = &merged->wm[level];
2541 ilk_merge_wm_level(dev, level, wm);
2543 if (level > last_enabled_level)
2545 else if (!ilk_validate_wm_level(level, max, wm))
2546 /* make sure all following levels get disabled */
2547 last_enabled_level = level - 1;
2550 * The spec says it is preferred to disable
2551 * FBC WMs instead of disabling a WM level.
2553 if (wm->fbc_val > max->fbc) {
2555 merged->fbc_wm_enabled = false;
2560 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2562 * FIXME this is racy. FBC might get enabled later.
2563 * What we should check here is whether FBC can be
2564 * enabled sometime later.
2566 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
2567 intel_fbc_is_active(dev_priv)) {
2568 for (level = 2; level <= max_level; level++) {
2569 struct intel_wm_level *wm = &merged->wm[level];
2576 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2578 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2579 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2582 /* The value we need to program into the WM_LPx latency field */
2583 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2585 struct drm_i915_private *dev_priv = to_i915(dev);
2587 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2590 return dev_priv->wm.pri_latency[level];
2593 static void ilk_compute_wm_results(struct drm_device *dev,
2594 const struct intel_pipe_wm *merged,
2595 enum intel_ddb_partitioning partitioning,
2596 struct ilk_wm_values *results)
2598 struct intel_crtc *intel_crtc;
2601 results->enable_fbc_wm = merged->fbc_wm_enabled;
2602 results->partitioning = partitioning;
2604 /* LP1+ register values */
2605 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2606 const struct intel_wm_level *r;
2608 level = ilk_wm_lp_to_level(wm_lp, merged);
2610 r = &merged->wm[level];
2613 * Maintain the watermark values even if the level is
2614 * disabled. Doing otherwise could cause underruns.
2616 results->wm_lp[wm_lp - 1] =
2617 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2618 (r->pri_val << WM1_LP_SR_SHIFT) |
2622 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2624 if (INTEL_INFO(dev)->gen >= 8)
2625 results->wm_lp[wm_lp - 1] |=
2626 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2628 results->wm_lp[wm_lp - 1] |=
2629 r->fbc_val << WM1_LP_FBC_SHIFT;
2632 * Always set WM1S_LP_EN when spr_val != 0, even if the
2633 * level is disabled. Doing otherwise could cause underruns.
2635 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2636 WARN_ON(wm_lp != 1);
2637 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2639 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2642 /* LP0 register values */
2643 for_each_intel_crtc(dev, intel_crtc) {
2644 enum pipe pipe = intel_crtc->pipe;
2645 const struct intel_wm_level *r =
2646 &intel_crtc->wm.active.ilk.wm[0];
2648 if (WARN_ON(!r->enable))
2651 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
2653 results->wm_pipe[pipe] =
2654 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2655 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2660 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2661 * case both are at the same level. Prefer r1 in case they're the same. */
2662 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2663 struct intel_pipe_wm *r1,
2664 struct intel_pipe_wm *r2)
2666 int level, max_level = ilk_wm_max_level(dev);
2667 int level1 = 0, level2 = 0;
2669 for (level = 1; level <= max_level; level++) {
2670 if (r1->wm[level].enable)
2672 if (r2->wm[level].enable)
2676 if (level1 == level2) {
2677 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2681 } else if (level1 > level2) {
2688 /* dirty bits used to track which watermarks need changes */
2689 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2690 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2691 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2692 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2693 #define WM_DIRTY_FBC (1 << 24)
2694 #define WM_DIRTY_DDB (1 << 25)
2696 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2697 const struct ilk_wm_values *old,
2698 const struct ilk_wm_values *new)
2700 unsigned int dirty = 0;
2704 for_each_pipe(dev_priv, pipe) {
2705 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2706 dirty |= WM_DIRTY_LINETIME(pipe);
2707 /* Must disable LP1+ watermarks too */
2708 dirty |= WM_DIRTY_LP_ALL;
2711 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2712 dirty |= WM_DIRTY_PIPE(pipe);
2713 /* Must disable LP1+ watermarks too */
2714 dirty |= WM_DIRTY_LP_ALL;
2718 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2719 dirty |= WM_DIRTY_FBC;
2720 /* Must disable LP1+ watermarks too */
2721 dirty |= WM_DIRTY_LP_ALL;
2724 if (old->partitioning != new->partitioning) {
2725 dirty |= WM_DIRTY_DDB;
2726 /* Must disable LP1+ watermarks too */
2727 dirty |= WM_DIRTY_LP_ALL;
2730 /* LP1+ watermarks already deemed dirty, no need to continue */
2731 if (dirty & WM_DIRTY_LP_ALL)
2734 /* Find the lowest numbered LP1+ watermark in need of an update... */
2735 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2736 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2737 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2741 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2742 for (; wm_lp <= 3; wm_lp++)
2743 dirty |= WM_DIRTY_LP(wm_lp);
2748 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2751 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2752 bool changed = false;
2754 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2755 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2756 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2759 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2760 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2761 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2764 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2765 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2766 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2771 * Don't touch WM1S_LP_EN here.
2772 * Doing so could cause underruns.
2779 * The spec says we shouldn't write when we don't need, because every write
2780 * causes WMs to be re-evaluated, expending some power.
2782 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2783 struct ilk_wm_values *results)
2785 struct drm_device *dev = &dev_priv->drm;
2786 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2790 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2794 _ilk_disable_lp_wm(dev_priv, dirty);
2796 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2797 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2798 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2799 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2800 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2801 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2803 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2804 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2805 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2806 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2807 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2808 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2810 if (dirty & WM_DIRTY_DDB) {
2811 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2812 val = I915_READ(WM_MISC);
2813 if (results->partitioning == INTEL_DDB_PART_1_2)
2814 val &= ~WM_MISC_DATA_PARTITION_5_6;
2816 val |= WM_MISC_DATA_PARTITION_5_6;
2817 I915_WRITE(WM_MISC, val);
2819 val = I915_READ(DISP_ARB_CTL2);
2820 if (results->partitioning == INTEL_DDB_PART_1_2)
2821 val &= ~DISP_DATA_PARTITION_5_6;
2823 val |= DISP_DATA_PARTITION_5_6;
2824 I915_WRITE(DISP_ARB_CTL2, val);
2828 if (dirty & WM_DIRTY_FBC) {
2829 val = I915_READ(DISP_ARB_CTL);
2830 if (results->enable_fbc_wm)
2831 val &= ~DISP_FBC_WM_DIS;
2833 val |= DISP_FBC_WM_DIS;
2834 I915_WRITE(DISP_ARB_CTL, val);
2837 if (dirty & WM_DIRTY_LP(1) &&
2838 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2839 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2841 if (INTEL_INFO(dev)->gen >= 7) {
2842 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2843 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2844 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2845 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2848 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2849 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2850 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2851 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2852 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2853 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2855 dev_priv->wm.hw = *results;
2858 bool ilk_disable_lp_wm(struct drm_device *dev)
2860 struct drm_i915_private *dev_priv = to_i915(dev);
2862 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2865 #define SKL_SAGV_BLOCK_TIME 30 /* µs */
2868 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2869 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2870 * other universal planes are in indices 1..n. Note that this may leave unused
2871 * indices between the top "sprite" plane and the cursor.
2874 skl_wm_plane_id(const struct intel_plane *plane)
2876 switch (plane->base.type) {
2877 case DRM_PLANE_TYPE_PRIMARY:
2879 case DRM_PLANE_TYPE_CURSOR:
2880 return PLANE_CURSOR;
2881 case DRM_PLANE_TYPE_OVERLAY:
2882 return plane->plane + 1;
2884 MISSING_CASE(plane->base.type);
2885 return plane->plane;
2890 * FIXME: We still don't have the proper code detect if we need to apply the WA,
2891 * so assume we'll always need it in order to avoid underruns.
2893 static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
2895 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2897 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
2898 IS_KABYLAKE(dev_priv))
2905 intel_has_sagv(struct drm_i915_private *dev_priv)
2907 if (IS_KABYLAKE(dev_priv))
2910 if (IS_SKYLAKE(dev_priv) &&
2911 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
2918 * SAGV dynamically adjusts the system agent voltage and clock frequencies
2919 * depending on power and performance requirements. The display engine access
2920 * to system memory is blocked during the adjustment time. Because of the
2921 * blocking time, having this enabled can cause full system hangs and/or pipe
2922 * underruns if we don't meet all of the following requirements:
2924 * - <= 1 pipe enabled
2925 * - All planes can enable watermarks for latencies >= SAGV engine block time
2926 * - We're not using an interlaced display configuration
2929 intel_enable_sagv(struct drm_i915_private *dev_priv)
2933 if (!intel_has_sagv(dev_priv))
2936 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
2939 DRM_DEBUG_KMS("Enabling the SAGV\n");
2940 mutex_lock(&dev_priv->rps.hw_lock);
2942 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2945 /* We don't need to wait for the SAGV when enabling */
2946 mutex_unlock(&dev_priv->rps.hw_lock);
2949 * Some skl systems, pre-release machines in particular,
2950 * don't actually have an SAGV.
2952 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
2953 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2954 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
2956 } else if (ret < 0) {
2957 DRM_ERROR("Failed to enable the SAGV\n");
2961 dev_priv->sagv_status = I915_SAGV_ENABLED;
2966 intel_disable_sagv(struct drm_i915_private *dev_priv)
2970 if (!intel_has_sagv(dev_priv))
2973 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
2976 DRM_DEBUG_KMS("Disabling the SAGV\n");
2977 mutex_lock(&dev_priv->rps.hw_lock);
2979 /* bspec says to keep retrying for at least 1 ms */
2980 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2982 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
2984 mutex_unlock(&dev_priv->rps.hw_lock);
2987 * Some skl systems, pre-release machines in particular,
2988 * don't actually have an SAGV.
2990 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
2991 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2992 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
2994 } else if (ret < 0) {
2995 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
2999 dev_priv->sagv_status = I915_SAGV_DISABLED;
3003 bool intel_can_enable_sagv(struct drm_atomic_state *state)
3005 struct drm_device *dev = state->dev;
3006 struct drm_i915_private *dev_priv = to_i915(dev);
3007 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3008 struct intel_crtc *crtc;
3009 struct intel_plane *plane;
3011 int level, id, latency;
3013 if (!intel_has_sagv(dev_priv))
3017 * SKL workaround: bspec recommends we disable the SAGV when we have
3018 * more then one pipe enabled
3020 * If there are no active CRTCs, no additional checks need be performed
3022 if (hweight32(intel_state->active_crtcs) == 0)
3024 else if (hweight32(intel_state->active_crtcs) > 1)
3027 /* Since we're now guaranteed to only have one active CRTC... */
3028 pipe = ffs(intel_state->active_crtcs) - 1;
3029 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
3031 if (crtc->base.state->mode.flags & DRM_MODE_FLAG_INTERLACE)
3034 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3035 id = skl_wm_plane_id(plane);
3037 /* Skip this plane if it's not enabled */
3038 if (intel_state->wm_results.plane[pipe][id][0] == 0)
3041 /* Find the highest enabled wm level for this plane */
3042 for (level = ilk_wm_max_level(dev);
3043 intel_state->wm_results.plane[pipe][id][level] == 0; --level)
3046 latency = dev_priv->wm.skl_latency[level];
3048 if (skl_needs_memory_bw_wa(intel_state) &&
3049 plane->base.state->fb->modifier[0] ==
3050 I915_FORMAT_MOD_X_TILED)
3054 * If any of the planes on this pipe don't enable wm levels
3055 * that incur memory latencies higher then 30µs we can't enable
3058 if (latency < SKL_SAGV_BLOCK_TIME)
3066 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3067 const struct intel_crtc_state *cstate,
3068 struct skl_ddb_entry *alloc, /* out */
3069 int *num_active /* out */)
3071 struct drm_atomic_state *state = cstate->base.state;
3072 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3073 struct drm_i915_private *dev_priv = to_i915(dev);
3074 struct drm_crtc *for_crtc = cstate->base.crtc;
3075 unsigned int pipe_size, ddb_size;
3076 int nth_active_pipe;
3077 int pipe = to_intel_crtc(for_crtc)->pipe;
3079 if (WARN_ON(!state) || !cstate->base.active) {
3082 *num_active = hweight32(dev_priv->active_crtcs);
3086 if (intel_state->active_pipe_changes)
3087 *num_active = hweight32(intel_state->active_crtcs);
3089 *num_active = hweight32(dev_priv->active_crtcs);
3091 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3092 WARN_ON(ddb_size == 0);
3094 ddb_size -= 4; /* 4 blocks for bypass path allocation */
3097 * If the state doesn't change the active CRTC's, then there's
3098 * no need to recalculate; the existing pipe allocation limits
3099 * should remain unchanged. Note that we're safe from racing
3100 * commits since any racing commit that changes the active CRTC
3101 * list would need to grab _all_ crtc locks, including the one
3102 * we currently hold.
3104 if (!intel_state->active_pipe_changes) {
3105 *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
3109 nth_active_pipe = hweight32(intel_state->active_crtcs &
3110 (drm_crtc_mask(for_crtc) - 1));
3111 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
3112 alloc->start = nth_active_pipe * ddb_size / *num_active;
3113 alloc->end = alloc->start + pipe_size;
3116 static unsigned int skl_cursor_allocation(int num_active)
3118 if (num_active == 1)
3124 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
3126 entry->start = reg & 0x3ff;
3127 entry->end = (reg >> 16) & 0x3ff;
3132 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3133 struct skl_ddb_allocation *ddb /* out */)
3139 memset(ddb, 0, sizeof(*ddb));
3141 for_each_pipe(dev_priv, pipe) {
3142 enum intel_display_power_domain power_domain;
3144 power_domain = POWER_DOMAIN_PIPE(pipe);
3145 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3148 for_each_plane(dev_priv, pipe, plane) {
3149 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
3150 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
3154 val = I915_READ(CUR_BUF_CFG(pipe));
3155 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
3158 intel_display_power_put(dev_priv, power_domain);
3163 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3164 * The bspec defines downscale amount as:
3167 * Horizontal down scale amount = maximum[1, Horizontal source size /
3168 * Horizontal destination size]
3169 * Vertical down scale amount = maximum[1, Vertical source size /
3170 * Vertical destination size]
3171 * Total down scale amount = Horizontal down scale amount *
3172 * Vertical down scale amount
3175 * Return value is provided in 16.16 fixed point form to retain fractional part.
3176 * Caller should take care of dividing & rounding off the value.
3179 skl_plane_downscale_amount(const struct intel_plane_state *pstate)
3181 uint32_t downscale_h, downscale_w;
3182 uint32_t src_w, src_h, dst_w, dst_h;
3184 if (WARN_ON(!pstate->base.visible))
3185 return DRM_PLANE_HELPER_NO_SCALING;
3187 /* n.b., src is 16.16 fixed point, dst is whole integer */
3188 src_w = drm_rect_width(&pstate->base.src);
3189 src_h = drm_rect_height(&pstate->base.src);
3190 dst_w = drm_rect_width(&pstate->base.dst);
3191 dst_h = drm_rect_height(&pstate->base.dst);
3192 if (intel_rotation_90_or_270(pstate->base.rotation))
3195 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3196 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3198 /* Provide result in 16.16 fixed point */
3199 return (uint64_t)downscale_w * downscale_h >> 16;
3203 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3204 const struct drm_plane_state *pstate,
3207 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3208 struct drm_framebuffer *fb = pstate->fb;
3209 uint32_t down_scale_amount, data_rate;
3210 uint32_t width = 0, height = 0;
3211 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
3213 if (!intel_pstate->base.visible)
3215 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
3217 if (y && format != DRM_FORMAT_NV12)
3220 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3221 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3223 if (intel_rotation_90_or_270(pstate->rotation))
3224 swap(width, height);
3226 /* for planar format */
3227 if (format == DRM_FORMAT_NV12) {
3228 if (y) /* y-plane data rate */
3229 data_rate = width * height *
3230 drm_format_plane_cpp(format, 0);
3231 else /* uv-plane data rate */
3232 data_rate = (width / 2) * (height / 2) *
3233 drm_format_plane_cpp(format, 1);
3235 /* for packed formats */
3236 data_rate = width * height * drm_format_plane_cpp(format, 0);
3239 down_scale_amount = skl_plane_downscale_amount(intel_pstate);
3241 return (uint64_t)data_rate * down_scale_amount >> 16;
3245 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3246 * a 8192x4096@32bpp framebuffer:
3247 * 3 * 4096 * 8192 * 4 < 2^32
3250 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
3252 struct drm_crtc_state *cstate = &intel_cstate->base;
3253 struct drm_atomic_state *state = cstate->state;
3254 struct drm_crtc *crtc = cstate->crtc;
3255 struct drm_device *dev = crtc->dev;
3256 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3257 const struct drm_plane *plane;
3258 const struct intel_plane *intel_plane;
3259 struct drm_plane_state *pstate;
3260 unsigned int rate, total_data_rate = 0;
3264 if (WARN_ON(!state))
3267 /* Calculate and cache data rate for each plane */
3268 for_each_plane_in_state(state, plane, pstate, i) {
3269 id = skl_wm_plane_id(to_intel_plane(plane));
3270 intel_plane = to_intel_plane(plane);
3272 if (intel_plane->pipe != intel_crtc->pipe)
3276 rate = skl_plane_relative_data_rate(intel_cstate,
3278 intel_cstate->wm.skl.plane_data_rate[id] = rate;
3281 rate = skl_plane_relative_data_rate(intel_cstate,
3283 intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
3286 /* Calculate CRTC's total data rate from cached values */
3287 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3288 int id = skl_wm_plane_id(intel_plane);
3291 total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
3292 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
3295 return total_data_rate;
3299 skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3302 struct drm_framebuffer *fb = pstate->fb;
3303 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3304 uint32_t src_w, src_h;
3305 uint32_t min_scanlines = 8;
3311 /* For packed formats, no y-plane, return 0 */
3312 if (y && fb->pixel_format != DRM_FORMAT_NV12)
3315 /* For Non Y-tile return 8-blocks */
3316 if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
3317 fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
3320 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
3321 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
3323 if (intel_rotation_90_or_270(pstate->rotation))
3326 /* Halve UV plane width and height for NV12 */
3327 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
3332 if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
3333 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
3335 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
3337 if (intel_rotation_90_or_270(pstate->rotation)) {
3338 switch (plane_bpp) {
3352 WARN(1, "Unsupported pixel depth %u for rotation",
3358 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3362 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3363 struct skl_ddb_allocation *ddb /* out */)
3365 struct drm_atomic_state *state = cstate->base.state;
3366 struct drm_crtc *crtc = cstate->base.crtc;
3367 struct drm_device *dev = crtc->dev;
3368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3369 struct intel_plane *intel_plane;
3370 struct drm_plane *plane;
3371 struct drm_plane_state *pstate;
3372 enum pipe pipe = intel_crtc->pipe;
3373 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3374 uint16_t alloc_size, start, cursor_blocks;
3375 uint16_t *minimum = cstate->wm.skl.minimum_blocks;
3376 uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
3377 unsigned int total_data_rate;
3381 /* Clear the partitioning for disabled planes. */
3382 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3383 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3385 if (WARN_ON(!state))
3388 if (!cstate->base.active) {
3389 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3393 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3394 alloc_size = skl_ddb_entry_size(alloc);
3395 if (alloc_size == 0) {
3396 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3400 cursor_blocks = skl_cursor_allocation(num_active);
3401 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
3402 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3404 alloc_size -= cursor_blocks;
3406 /* 1. Allocate the mininum required blocks for each active plane */
3407 for_each_plane_in_state(state, plane, pstate, i) {
3408 intel_plane = to_intel_plane(plane);
3409 id = skl_wm_plane_id(intel_plane);
3411 if (intel_plane->pipe != pipe)
3414 if (!to_intel_plane_state(pstate)->base.visible) {
3419 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3425 minimum[id] = skl_ddb_min_alloc(pstate, 0);
3426 y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
3429 for (i = 0; i < PLANE_CURSOR; i++) {
3430 alloc_size -= minimum[i];
3431 alloc_size -= y_minimum[i];
3435 * 2. Distribute the remaining space in proportion to the amount of
3436 * data each plane needs to fetch from memory.
3438 * FIXME: we may not allocate every single block here.
3440 total_data_rate = skl_get_total_relative_data_rate(cstate);
3441 if (total_data_rate == 0)
3444 start = alloc->start;
3445 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3446 unsigned int data_rate, y_data_rate;
3447 uint16_t plane_blocks, y_plane_blocks = 0;
3448 int id = skl_wm_plane_id(intel_plane);
3450 data_rate = cstate->wm.skl.plane_data_rate[id];
3453 * allocation for (packed formats) or (uv-plane part of planar format):
3454 * promote the expression to 64 bits to avoid overflowing, the
3455 * result is < available as data_rate / total_data_rate < 1
3457 plane_blocks = minimum[id];
3458 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3461 /* Leave disabled planes at (0,0) */
3463 ddb->plane[pipe][id].start = start;
3464 ddb->plane[pipe][id].end = start + plane_blocks;
3467 start += plane_blocks;
3470 * allocation for y_plane part of planar format:
3472 y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
3474 y_plane_blocks = y_minimum[id];
3475 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3479 ddb->y_plane[pipe][id].start = start;
3480 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3483 start += y_plane_blocks;
3490 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3491 * for the read latency) and cpp should always be <= 8, so that
3492 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3493 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3495 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
3497 uint32_t wm_intermediate_val, ret;
3502 wm_intermediate_val = latency * pixel_rate * cpp / 512;
3503 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3508 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3509 uint32_t latency, uint32_t plane_blocks_per_line)
3512 uint32_t wm_intermediate_val;
3517 wm_intermediate_val = latency * pixel_rate;
3518 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3519 plane_blocks_per_line;
3524 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3525 struct intel_plane_state *pstate)
3527 uint64_t adjusted_pixel_rate;
3528 uint64_t downscale_amount;
3529 uint64_t pixel_rate;
3531 /* Shouldn't reach here on disabled planes... */
3532 if (WARN_ON(!pstate->base.visible))
3536 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3537 * with additional adjustments for plane-specific scaling.
3539 adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
3540 downscale_amount = skl_plane_downscale_amount(pstate);
3542 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3543 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3548 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3549 struct intel_crtc_state *cstate,
3550 struct intel_plane_state *intel_pstate,
3551 uint16_t ddb_allocation,
3553 uint16_t *out_blocks, /* out */
3554 uint8_t *out_lines, /* out */
3555 bool *enabled /* out */)
3557 struct drm_plane_state *pstate = &intel_pstate->base;
3558 struct drm_framebuffer *fb = pstate->fb;
3559 uint32_t latency = dev_priv->wm.skl_latency[level];
3560 uint32_t method1, method2;
3561 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3562 uint32_t res_blocks, res_lines;
3563 uint32_t selected_result;
3565 uint32_t width = 0, height = 0;
3566 uint32_t plane_pixel_rate;
3567 uint32_t y_tile_minimum, y_min_scanlines;
3568 struct intel_atomic_state *state =
3569 to_intel_atomic_state(cstate->base.state);
3570 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
3572 if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
3577 if (apply_memory_bw_wa && fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
3580 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3581 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3583 if (intel_rotation_90_or_270(pstate->rotation))
3584 swap(width, height);
3586 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3587 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3589 if (intel_rotation_90_or_270(pstate->rotation)) {
3590 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3591 drm_format_plane_cpp(fb->pixel_format, 1) :
3592 drm_format_plane_cpp(fb->pixel_format, 0);
3596 y_min_scanlines = 16;
3599 y_min_scanlines = 8;
3602 WARN(1, "Unsupported pixel depth for rotation");
3604 y_min_scanlines = 4;
3608 y_min_scanlines = 4;
3611 if (apply_memory_bw_wa)
3612 y_min_scanlines *= 2;
3614 plane_bytes_per_line = width * cpp;
3615 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3616 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3617 plane_blocks_per_line =
3618 DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
3619 plane_blocks_per_line /= y_min_scanlines;
3620 } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
3621 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
3624 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3627 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3628 method2 = skl_wm_method2(plane_pixel_rate,
3629 cstate->base.adjusted_mode.crtc_htotal,
3631 plane_blocks_per_line);
3633 y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
3635 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3636 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3637 selected_result = max(method2, y_tile_minimum);
3639 if ((ddb_allocation / plane_blocks_per_line) >= 1)
3640 selected_result = min(method1, method2);
3642 selected_result = method1;
3645 res_blocks = selected_result + 1;
3646 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
3648 if (level >= 1 && level <= 7) {
3649 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3650 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3651 res_blocks += y_tile_minimum;
3652 res_lines += y_min_scanlines;
3658 if (res_blocks >= ddb_allocation || res_lines > 31) {
3662 * If there are no valid level 0 watermarks, then we can't
3663 * support this display configuration.
3668 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3669 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3670 to_intel_crtc(cstate->base.crtc)->pipe,
3671 skl_wm_plane_id(to_intel_plane(pstate->plane)),
3672 res_blocks, ddb_allocation, res_lines);
3678 *out_blocks = res_blocks;
3679 *out_lines = res_lines;
3686 skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3687 struct skl_ddb_allocation *ddb,
3688 struct intel_crtc_state *cstate,
3690 struct skl_wm_level *result)
3692 struct drm_atomic_state *state = cstate->base.state;
3693 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3694 struct drm_plane *plane;
3695 struct intel_plane *intel_plane;
3696 struct intel_plane_state *intel_pstate;
3697 uint16_t ddb_blocks;
3698 enum pipe pipe = intel_crtc->pipe;
3702 * We'll only calculate watermarks for planes that are actually
3703 * enabled, so make sure all other planes are set as disabled.
3705 memset(result, 0, sizeof(*result));
3707 for_each_intel_plane_mask(&dev_priv->drm,
3709 cstate->base.plane_mask) {
3710 int i = skl_wm_plane_id(intel_plane);
3712 plane = &intel_plane->base;
3713 intel_pstate = NULL;
3716 intel_atomic_get_existing_plane_state(state,
3720 * Note: If we start supporting multiple pending atomic commits
3721 * against the same planes/CRTC's in the future, plane->state
3722 * will no longer be the correct pre-state to use for the
3723 * calculations here and we'll need to change where we get the
3724 * 'unchanged' plane data from.
3726 * For now this is fine because we only allow one queued commit
3727 * against a CRTC. Even if the plane isn't modified by this
3728 * transaction and we don't have a plane lock, we still have
3729 * the CRTC's lock, so we know that no other transactions are
3730 * racing with us to update it.
3733 intel_pstate = to_intel_plane_state(plane->state);
3735 WARN_ON(!intel_pstate->base.fb);
3737 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3739 ret = skl_compute_plane_wm(dev_priv,
3744 &result->plane_res_b[i],
3745 &result->plane_res_l[i],
3746 &result->plane_en[i]);
3755 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3757 if (!cstate->base.active)
3760 if (WARN_ON(ilk_pipe_pixel_rate(cstate) == 0))
3763 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3764 ilk_pipe_pixel_rate(cstate));
3767 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3768 struct skl_wm_level *trans_wm /* out */)
3770 struct drm_crtc *crtc = cstate->base.crtc;
3771 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3772 struct intel_plane *intel_plane;
3774 if (!cstate->base.active)
3777 /* Until we know more, just disable transition WMs */
3778 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
3779 int i = skl_wm_plane_id(intel_plane);
3781 trans_wm->plane_en[i] = false;
3785 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3786 struct skl_ddb_allocation *ddb,
3787 struct skl_pipe_wm *pipe_wm)
3789 struct drm_device *dev = cstate->base.crtc->dev;
3790 const struct drm_i915_private *dev_priv = to_i915(dev);
3791 int level, max_level = ilk_wm_max_level(dev);
3794 for (level = 0; level <= max_level; level++) {
3795 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3796 level, &pipe_wm->wm[level]);
3800 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3802 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3807 static void skl_compute_wm_results(struct drm_device *dev,
3808 struct skl_pipe_wm *p_wm,
3809 struct skl_wm_values *r,
3810 struct intel_crtc *intel_crtc)
3812 int level, max_level = ilk_wm_max_level(dev);
3813 enum pipe pipe = intel_crtc->pipe;
3817 for (level = 0; level <= max_level; level++) {
3818 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3821 temp |= p_wm->wm[level].plane_res_l[i] <<
3822 PLANE_WM_LINES_SHIFT;
3823 temp |= p_wm->wm[level].plane_res_b[i];
3824 if (p_wm->wm[level].plane_en[i])
3825 temp |= PLANE_WM_EN;
3827 r->plane[pipe][i][level] = temp;
3832 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3833 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
3835 if (p_wm->wm[level].plane_en[PLANE_CURSOR])
3836 temp |= PLANE_WM_EN;
3838 r->plane[pipe][PLANE_CURSOR][level] = temp;
3842 /* transition WMs */
3843 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3845 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3846 temp |= p_wm->trans_wm.plane_res_b[i];
3847 if (p_wm->trans_wm.plane_en[i])
3848 temp |= PLANE_WM_EN;
3850 r->plane_trans[pipe][i] = temp;
3854 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3855 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
3856 if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
3857 temp |= PLANE_WM_EN;
3859 r->plane_trans[pipe][PLANE_CURSOR] = temp;
3861 r->wm_linetime[pipe] = p_wm->linetime;
3864 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3866 const struct skl_ddb_entry *entry)
3869 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3874 void skl_write_plane_wm(struct intel_crtc *intel_crtc,
3875 const struct skl_wm_values *wm,
3878 struct drm_crtc *crtc = &intel_crtc->base;
3879 struct drm_device *dev = crtc->dev;
3880 struct drm_i915_private *dev_priv = to_i915(dev);
3881 int level, max_level = ilk_wm_max_level(dev);
3882 enum pipe pipe = intel_crtc->pipe;
3884 for (level = 0; level <= max_level; level++) {
3885 I915_WRITE(PLANE_WM(pipe, plane, level),
3886 wm->plane[pipe][plane][level]);
3888 I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
3890 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
3891 &wm->ddb.plane[pipe][plane]);
3892 skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
3893 &wm->ddb.y_plane[pipe][plane]);
3896 void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
3897 const struct skl_wm_values *wm)
3899 struct drm_crtc *crtc = &intel_crtc->base;
3900 struct drm_device *dev = crtc->dev;
3901 struct drm_i915_private *dev_priv = to_i915(dev);
3902 int level, max_level = ilk_wm_max_level(dev);
3903 enum pipe pipe = intel_crtc->pipe;
3905 for (level = 0; level <= max_level; level++) {
3906 I915_WRITE(CUR_WM(pipe, level),
3907 wm->plane[pipe][PLANE_CURSOR][level]);
3909 I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
3911 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3912 &wm->ddb.plane[pipe][PLANE_CURSOR]);
3915 bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
3916 const struct skl_ddb_allocation *new,
3919 return new->pipe[pipe].start == old->pipe[pipe].start &&
3920 new->pipe[pipe].end == old->pipe[pipe].end;
3923 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
3924 const struct skl_ddb_entry *b)
3926 return a->start < b->end && b->start < a->end;
3929 bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
3930 const struct skl_ddb_allocation *old,
3931 const struct skl_ddb_allocation *new,
3934 struct drm_device *dev = state->dev;
3935 struct intel_crtc *intel_crtc;
3938 for_each_intel_crtc(dev, intel_crtc) {
3939 otherp = intel_crtc->pipe;
3944 if (skl_ddb_entries_overlap(&new->pipe[pipe],
3945 &old->pipe[otherp]))
3952 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3953 struct skl_ddb_allocation *ddb, /* out */
3954 struct skl_pipe_wm *pipe_wm, /* out */
3955 bool *changed /* out */)
3957 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
3958 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
3961 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3965 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3974 pipes_modified(struct drm_atomic_state *state)
3976 struct drm_crtc *crtc;
3977 struct drm_crtc_state *cstate;
3978 uint32_t i, ret = 0;
3980 for_each_crtc_in_state(state, crtc, cstate, i)
3981 ret |= drm_crtc_mask(crtc);
3987 skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
3989 struct drm_atomic_state *state = cstate->base.state;
3990 struct drm_device *dev = state->dev;
3991 struct drm_crtc *crtc = cstate->base.crtc;
3992 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3993 struct drm_i915_private *dev_priv = to_i915(dev);
3994 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3995 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
3996 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3997 struct drm_plane_state *plane_state;
3998 struct drm_plane *plane;
3999 enum pipe pipe = intel_crtc->pipe;
4002 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
4004 drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
4005 id = skl_wm_plane_id(to_intel_plane(plane));
4007 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
4008 &new_ddb->plane[pipe][id]) &&
4009 skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id],
4010 &new_ddb->y_plane[pipe][id]))
4013 plane_state = drm_atomic_get_plane_state(state, plane);
4014 if (IS_ERR(plane_state))
4015 return PTR_ERR(plane_state);
4022 skl_compute_ddb(struct drm_atomic_state *state)
4024 struct drm_device *dev = state->dev;
4025 struct drm_i915_private *dev_priv = to_i915(dev);
4026 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4027 struct intel_crtc *intel_crtc;
4028 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
4029 uint32_t realloc_pipes = pipes_modified(state);
4033 * If this is our first atomic update following hardware readout,
4034 * we can't trust the DDB that the BIOS programmed for us. Let's
4035 * pretend that all pipes switched active status so that we'll
4036 * ensure a full DDB recompute.
4038 if (dev_priv->wm.distrust_bios_wm) {
4039 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4040 state->acquire_ctx);
4044 intel_state->active_pipe_changes = ~0;
4047 * We usually only initialize intel_state->active_crtcs if we
4048 * we're doing a modeset; make sure this field is always
4049 * initialized during the sanitization process that happens
4050 * on the first commit too.
4052 if (!intel_state->modeset)
4053 intel_state->active_crtcs = dev_priv->active_crtcs;
4057 * If the modeset changes which CRTC's are active, we need to
4058 * recompute the DDB allocation for *all* active pipes, even
4059 * those that weren't otherwise being modified in any way by this
4060 * atomic commit. Due to the shrinking of the per-pipe allocations
4061 * when new active CRTC's are added, it's possible for a pipe that
4062 * we were already using and aren't changing at all here to suddenly
4063 * become invalid if its DDB needs exceeds its new allocation.
4065 * Note that if we wind up doing a full DDB recompute, we can't let
4066 * any other display updates race with this transaction, so we need
4067 * to grab the lock on *all* CRTC's.
4069 if (intel_state->active_pipe_changes) {
4071 intel_state->wm_results.dirty_pipes = ~0;
4075 * We're not recomputing for the pipes not included in the commit, so
4076 * make sure we start with the current state.
4078 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
4080 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
4081 struct intel_crtc_state *cstate;
4083 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
4085 return PTR_ERR(cstate);
4087 ret = skl_allocate_pipe_ddb(cstate, ddb);
4091 ret = skl_ddb_add_affected_planes(cstate);
4100 skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4101 struct skl_wm_values *src,
4104 dst->wm_linetime[pipe] = src->wm_linetime[pipe];
4105 memcpy(dst->plane[pipe], src->plane[pipe],
4106 sizeof(dst->plane[pipe]));
4107 memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
4108 sizeof(dst->plane_trans[pipe]));
4110 dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
4111 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4112 sizeof(dst->ddb.y_plane[pipe]));
4113 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4114 sizeof(dst->ddb.plane[pipe]));
4118 skl_compute_wm(struct drm_atomic_state *state)
4120 struct drm_crtc *crtc;
4121 struct drm_crtc_state *cstate;
4122 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4123 struct skl_wm_values *results = &intel_state->wm_results;
4124 struct drm_device *dev = state->dev;
4125 struct skl_pipe_wm *pipe_wm;
4126 bool changed = false;
4130 * When we distrust bios wm we always need to recompute to set the
4131 * expected DDB allocations for each CRTC.
4133 if (to_i915(dev)->wm.distrust_bios_wm)
4137 * If this transaction isn't actually touching any CRTC's, don't
4138 * bother with watermark calculation. Note that if we pass this
4139 * test, we're guaranteed to hold at least one CRTC state mutex,
4140 * which means we can safely use values like dev_priv->active_crtcs
4141 * since any racing commits that want to update them would need to
4142 * hold _all_ CRTC state mutexes.
4144 for_each_crtc_in_state(state, crtc, cstate, i)
4150 /* Clear all dirty flags */
4151 results->dirty_pipes = 0;
4153 ret = skl_compute_ddb(state);
4158 * Calculate WM's for all pipes that are part of this transaction.
4159 * Note that the DDB allocation above may have added more CRTC's that
4160 * weren't otherwise being modified (and set bits in dirty_pipes) if
4161 * pipe allocations had to change.
4163 * FIXME: Now that we're doing this in the atomic check phase, we
4164 * should allow skl_update_pipe_wm() to return failure in cases where
4165 * no suitable watermark values can be found.
4167 for_each_crtc_in_state(state, crtc, cstate, i) {
4168 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4169 struct intel_crtc_state *intel_cstate =
4170 to_intel_crtc_state(cstate);
4172 pipe_wm = &intel_cstate->wm.skl.optimal;
4173 ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
4179 results->dirty_pipes |= drm_crtc_mask(crtc);
4181 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4182 /* This pipe's WM's did not change */
4185 intel_cstate->update_wm_pre = true;
4186 skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
4192 static void skl_update_wm(struct drm_crtc *crtc)
4194 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4195 struct drm_device *dev = crtc->dev;
4196 struct drm_i915_private *dev_priv = to_i915(dev);
4197 struct skl_wm_values *results = &dev_priv->wm.skl_results;
4198 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
4199 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4200 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
4201 enum pipe pipe = intel_crtc->pipe;
4203 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4206 intel_crtc->wm.active.skl = *pipe_wm;
4208 mutex_lock(&dev_priv->wm.wm_mutex);
4211 * If this pipe isn't active already, we're going to be enabling it
4212 * very soon. Since it's safe to update a pipe's ddb allocation while
4213 * the pipe's shut off, just do so here. Already active pipes will have
4214 * their watermarks updated once we update their planes.
4216 if (crtc->state->active_changed) {
4219 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
4220 skl_write_plane_wm(intel_crtc, results, plane);
4222 skl_write_cursor_wm(intel_crtc, results);
4225 skl_copy_wm_for_pipe(hw_vals, results, pipe);
4227 mutex_unlock(&dev_priv->wm.wm_mutex);
4230 static void ilk_compute_wm_config(struct drm_device *dev,
4231 struct intel_wm_config *config)
4233 struct intel_crtc *crtc;
4235 /* Compute the currently _active_ config */
4236 for_each_intel_crtc(dev, crtc) {
4237 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
4239 if (!wm->pipe_enabled)
4242 config->sprites_enabled |= wm->sprites_enabled;
4243 config->sprites_scaled |= wm->sprites_scaled;
4244 config->num_pipes_active++;
4248 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
4250 struct drm_device *dev = &dev_priv->drm;
4251 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
4252 struct ilk_wm_maximums max;
4253 struct intel_wm_config config = {};
4254 struct ilk_wm_values results = {};
4255 enum intel_ddb_partitioning partitioning;
4257 ilk_compute_wm_config(dev, &config);
4259 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
4260 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
4262 /* 5/6 split only in single pipe config on IVB+ */
4263 if (INTEL_INFO(dev)->gen >= 7 &&
4264 config.num_pipes_active == 1 && config.sprites_enabled) {
4265 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
4266 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
4268 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
4270 best_lp_wm = &lp_wm_1_2;
4273 partitioning = (best_lp_wm == &lp_wm_1_2) ?
4274 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
4276 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
4278 ilk_write_wm_values(dev_priv, &results);
4281 static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
4283 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4284 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4286 mutex_lock(&dev_priv->wm.wm_mutex);
4287 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
4288 ilk_program_watermarks(dev_priv);
4289 mutex_unlock(&dev_priv->wm.wm_mutex);
4292 static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
4294 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4295 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4297 mutex_lock(&dev_priv->wm.wm_mutex);
4298 if (cstate->wm.need_postvbl_update) {
4299 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
4300 ilk_program_watermarks(dev_priv);
4302 mutex_unlock(&dev_priv->wm.wm_mutex);
4305 static void skl_pipe_wm_active_state(uint32_t val,
4306 struct skl_pipe_wm *active,
4312 bool is_enabled = (val & PLANE_WM_EN) != 0;
4316 active->wm[level].plane_en[i] = is_enabled;
4317 active->wm[level].plane_res_b[i] =
4318 val & PLANE_WM_BLOCKS_MASK;
4319 active->wm[level].plane_res_l[i] =
4320 (val >> PLANE_WM_LINES_SHIFT) &
4321 PLANE_WM_LINES_MASK;
4323 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
4324 active->wm[level].plane_res_b[PLANE_CURSOR] =
4325 val & PLANE_WM_BLOCKS_MASK;
4326 active->wm[level].plane_res_l[PLANE_CURSOR] =
4327 (val >> PLANE_WM_LINES_SHIFT) &
4328 PLANE_WM_LINES_MASK;
4332 active->trans_wm.plane_en[i] = is_enabled;
4333 active->trans_wm.plane_res_b[i] =
4334 val & PLANE_WM_BLOCKS_MASK;
4335 active->trans_wm.plane_res_l[i] =
4336 (val >> PLANE_WM_LINES_SHIFT) &
4337 PLANE_WM_LINES_MASK;
4339 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
4340 active->trans_wm.plane_res_b[PLANE_CURSOR] =
4341 val & PLANE_WM_BLOCKS_MASK;
4342 active->trans_wm.plane_res_l[PLANE_CURSOR] =
4343 (val >> PLANE_WM_LINES_SHIFT) &
4344 PLANE_WM_LINES_MASK;
4349 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4351 struct drm_device *dev = crtc->dev;
4352 struct drm_i915_private *dev_priv = to_i915(dev);
4353 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
4354 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4355 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4356 struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
4357 enum pipe pipe = intel_crtc->pipe;
4358 int level, i, max_level;
4361 max_level = ilk_wm_max_level(dev);
4363 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4365 for (level = 0; level <= max_level; level++) {
4366 for (i = 0; i < intel_num_planes(intel_crtc); i++)
4367 hw->plane[pipe][i][level] =
4368 I915_READ(PLANE_WM(pipe, i, level));
4369 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
4372 for (i = 0; i < intel_num_planes(intel_crtc); i++)
4373 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
4374 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
4376 if (!intel_crtc->active)
4379 hw->dirty_pipes |= drm_crtc_mask(crtc);
4381 active->linetime = hw->wm_linetime[pipe];
4383 for (level = 0; level <= max_level; level++) {
4384 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
4385 temp = hw->plane[pipe][i][level];
4386 skl_pipe_wm_active_state(temp, active, false,
4389 temp = hw->plane[pipe][PLANE_CURSOR][level];
4390 skl_pipe_wm_active_state(temp, active, false, true, i, level);
4393 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
4394 temp = hw->plane_trans[pipe][i];
4395 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
4398 temp = hw->plane_trans[pipe][PLANE_CURSOR];
4399 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
4401 intel_crtc->wm.active.skl = *active;
4404 void skl_wm_get_hw_state(struct drm_device *dev)
4406 struct drm_i915_private *dev_priv = to_i915(dev);
4407 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
4408 struct drm_crtc *crtc;
4410 skl_ddb_get_hw_state(dev_priv, ddb);
4411 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
4412 skl_pipe_wm_get_hw_state(crtc);
4414 if (dev_priv->active_crtcs) {
4415 /* Fully recompute DDB on first atomic commit */
4416 dev_priv->wm.distrust_bios_wm = true;
4418 /* Easy/common case; just sanitize DDB now if everything off */
4419 memset(ddb, 0, sizeof(*ddb));
4423 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4425 struct drm_device *dev = crtc->dev;
4426 struct drm_i915_private *dev_priv = to_i915(dev);
4427 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4428 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4429 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4430 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
4431 enum pipe pipe = intel_crtc->pipe;
4432 static const i915_reg_t wm0_pipe_reg[] = {
4433 [PIPE_A] = WM0_PIPEA_ILK,
4434 [PIPE_B] = WM0_PIPEB_ILK,
4435 [PIPE_C] = WM0_PIPEC_IVB,
4438 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
4439 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4440 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4442 memset(active, 0, sizeof(*active));
4444 active->pipe_enabled = intel_crtc->active;
4446 if (active->pipe_enabled) {
4447 u32 tmp = hw->wm_pipe[pipe];
4450 * For active pipes LP0 watermark is marked as
4451 * enabled, and LP1+ watermaks as disabled since
4452 * we can't really reverse compute them in case
4453 * multiple pipes are active.
4455 active->wm[0].enable = true;
4456 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
4457 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
4458 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
4459 active->linetime = hw->wm_linetime[pipe];
4461 int level, max_level = ilk_wm_max_level(dev);
4464 * For inactive pipes, all watermark levels
4465 * should be marked as enabled but zeroed,
4466 * which is what we'd compute them to.
4468 for (level = 0; level <= max_level; level++)
4469 active->wm[level].enable = true;
4472 intel_crtc->wm.active.ilk = *active;
4475 #define _FW_WM(value, plane) \
4476 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4477 #define _FW_WM_VLV(value, plane) \
4478 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4480 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
4481 struct vlv_wm_values *wm)
4486 for_each_pipe(dev_priv, pipe) {
4487 tmp = I915_READ(VLV_DDL(pipe));
4489 wm->ddl[pipe].primary =
4490 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4491 wm->ddl[pipe].cursor =
4492 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4493 wm->ddl[pipe].sprite[0] =
4494 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4495 wm->ddl[pipe].sprite[1] =
4496 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4499 tmp = I915_READ(DSPFW1);
4500 wm->sr.plane = _FW_WM(tmp, SR);
4501 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
4502 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
4503 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
4505 tmp = I915_READ(DSPFW2);
4506 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
4507 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
4508 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
4510 tmp = I915_READ(DSPFW3);
4511 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4513 if (IS_CHERRYVIEW(dev_priv)) {
4514 tmp = I915_READ(DSPFW7_CHV);
4515 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4516 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4518 tmp = I915_READ(DSPFW8_CHV);
4519 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
4520 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
4522 tmp = I915_READ(DSPFW9_CHV);
4523 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
4524 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
4526 tmp = I915_READ(DSPHOWM);
4527 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4528 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4529 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4530 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
4531 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4532 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4533 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4534 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4535 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4536 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4538 tmp = I915_READ(DSPFW7);
4539 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4540 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4542 tmp = I915_READ(DSPHOWM);
4543 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4544 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4545 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4546 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4547 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4548 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4549 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4556 void vlv_wm_get_hw_state(struct drm_device *dev)
4558 struct drm_i915_private *dev_priv = to_i915(dev);
4559 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4560 struct intel_plane *plane;
4564 vlv_read_wm_values(dev_priv, wm);
4566 for_each_intel_plane(dev, plane) {
4567 switch (plane->base.type) {
4569 case DRM_PLANE_TYPE_CURSOR:
4570 plane->wm.fifo_size = 63;
4572 case DRM_PLANE_TYPE_PRIMARY:
4573 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
4575 case DRM_PLANE_TYPE_OVERLAY:
4576 sprite = plane->plane;
4577 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
4582 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4583 wm->level = VLV_WM_LEVEL_PM2;
4585 if (IS_CHERRYVIEW(dev_priv)) {
4586 mutex_lock(&dev_priv->rps.hw_lock);
4588 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4589 if (val & DSP_MAXFIFO_PM5_ENABLE)
4590 wm->level = VLV_WM_LEVEL_PM5;
4593 * If DDR DVFS is disabled in the BIOS, Punit
4594 * will never ack the request. So if that happens
4595 * assume we don't have to enable/disable DDR DVFS
4596 * dynamically. To test that just set the REQ_ACK
4597 * bit to poke the Punit, but don't change the
4598 * HIGH/LOW bits so that we don't actually change
4599 * the current state.
4601 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4602 val |= FORCE_DDR_FREQ_REQ_ACK;
4603 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
4605 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
4606 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
4607 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4608 "assuming DDR DVFS is disabled\n");
4609 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4611 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4612 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4613 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4616 mutex_unlock(&dev_priv->rps.hw_lock);
4619 for_each_pipe(dev_priv, pipe)
4620 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4621 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4622 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4624 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4625 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4628 void ilk_wm_get_hw_state(struct drm_device *dev)
4630 struct drm_i915_private *dev_priv = to_i915(dev);
4631 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4632 struct drm_crtc *crtc;
4634 for_each_crtc(dev, crtc)
4635 ilk_pipe_wm_get_hw_state(crtc);
4637 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4638 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4639 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4641 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4642 if (INTEL_INFO(dev)->gen >= 7) {
4643 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4644 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4647 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4648 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4649 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4650 else if (IS_IVYBRIDGE(dev))
4651 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4652 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4655 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4659 * intel_update_watermarks - update FIFO watermark values based on current modes
4661 * Calculate watermark values for the various WM regs based on current mode
4662 * and plane configuration.
4664 * There are several cases to deal with here:
4665 * - normal (i.e. non-self-refresh)
4666 * - self-refresh (SR) mode
4667 * - lines are large relative to FIFO size (buffer can hold up to 2)
4668 * - lines are small relative to FIFO size (buffer can hold more than 2
4669 * lines), so need to account for TLB latency
4671 * The normal calculation is:
4672 * watermark = dotclock * bytes per pixel * latency
4673 * where latency is platform & configuration dependent (we assume pessimal
4676 * The SR calculation is:
4677 * watermark = (trunc(latency/line time)+1) * surface width *
4680 * line time = htotal / dotclock
4681 * surface width = hdisplay for normal plane and 64 for cursor
4682 * and latency is assumed to be high, as above.
4684 * The final value programmed to the register should always be rounded up,
4685 * and include an extra 2 entries to account for clock crossings.
4687 * We don't use the sprite, so we can ignore that. And on Crestline we have
4688 * to set the non-SR watermarks to 8.
4690 void intel_update_watermarks(struct drm_crtc *crtc)
4692 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4694 if (dev_priv->display.update_wm)
4695 dev_priv->display.update_wm(crtc);
4699 * Lock protecting IPS related data structures
4701 DEFINE_SPINLOCK(mchdev_lock);
4703 /* Global for IPS driver to get at the current i915 device. Protected by
4705 static struct drm_i915_private *i915_mch_dev;
4707 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4711 assert_spin_locked(&mchdev_lock);
4713 rgvswctl = I915_READ16(MEMSWCTL);
4714 if (rgvswctl & MEMCTL_CMD_STS) {
4715 DRM_DEBUG("gpu busy, RCS change rejected\n");
4716 return false; /* still busy with another command */
4719 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4720 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4721 I915_WRITE16(MEMSWCTL, rgvswctl);
4722 POSTING_READ16(MEMSWCTL);
4724 rgvswctl |= MEMCTL_CMD_STS;
4725 I915_WRITE16(MEMSWCTL, rgvswctl);
4730 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
4733 u8 fmax, fmin, fstart, vstart;
4735 spin_lock_irq(&mchdev_lock);
4737 rgvmodectl = I915_READ(MEMMODECTL);
4739 /* Enable temp reporting */
4740 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4741 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4743 /* 100ms RC evaluation intervals */
4744 I915_WRITE(RCUPEI, 100000);
4745 I915_WRITE(RCDNEI, 100000);
4747 /* Set max/min thresholds to 90ms and 80ms respectively */
4748 I915_WRITE(RCBMAXAVG, 90000);
4749 I915_WRITE(RCBMINAVG, 80000);
4751 I915_WRITE(MEMIHYST, 1);
4753 /* Set up min, max, and cur for interrupt handling */
4754 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4755 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4756 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4757 MEMMODE_FSTART_SHIFT;
4759 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4762 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4763 dev_priv->ips.fstart = fstart;
4765 dev_priv->ips.max_delay = fstart;
4766 dev_priv->ips.min_delay = fmin;
4767 dev_priv->ips.cur_delay = fstart;
4769 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4770 fmax, fmin, fstart);
4772 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4775 * Interrupts will be enabled in ironlake_irq_postinstall
4778 I915_WRITE(VIDSTART, vstart);
4779 POSTING_READ(VIDSTART);
4781 rgvmodectl |= MEMMODE_SWMODE_EN;
4782 I915_WRITE(MEMMODECTL, rgvmodectl);
4784 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
4785 DRM_ERROR("stuck trying to change perf mode\n");
4788 ironlake_set_drps(dev_priv, fstart);
4790 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4791 I915_READ(DDREC) + I915_READ(CSIEC);
4792 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4793 dev_priv->ips.last_count2 = I915_READ(GFXEC);
4794 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4796 spin_unlock_irq(&mchdev_lock);
4799 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
4803 spin_lock_irq(&mchdev_lock);
4805 rgvswctl = I915_READ16(MEMSWCTL);
4807 /* Ack interrupts, disable EFC interrupt */
4808 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4809 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4810 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4811 I915_WRITE(DEIIR, DE_PCU_EVENT);
4812 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4814 /* Go back to the starting frequency */
4815 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
4817 rgvswctl |= MEMCTL_CMD_STS;
4818 I915_WRITE(MEMSWCTL, rgvswctl);
4821 spin_unlock_irq(&mchdev_lock);
4824 /* There's a funny hw issue where the hw returns all 0 when reading from
4825 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4826 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4827 * all limits and the gpu stuck at whatever frequency it is at atm).
4829 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
4833 /* Only set the down limit when we've reached the lowest level to avoid
4834 * getting more interrupts, otherwise leave this clear. This prevents a
4835 * race in the hw when coming out of rc6: There's a tiny window where
4836 * the hw runs at the minimal clock before selecting the desired
4837 * frequency, if the down threshold expires in that window we will not
4838 * receive a down interrupt. */
4839 if (IS_GEN9(dev_priv)) {
4840 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4841 if (val <= dev_priv->rps.min_freq_softlimit)
4842 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4844 limits = dev_priv->rps.max_freq_softlimit << 24;
4845 if (val <= dev_priv->rps.min_freq_softlimit)
4846 limits |= dev_priv->rps.min_freq_softlimit << 16;
4852 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4855 u32 threshold_up = 0, threshold_down = 0; /* in % */
4856 u32 ei_up = 0, ei_down = 0;
4858 new_power = dev_priv->rps.power;
4859 switch (dev_priv->rps.power) {
4861 if (val > dev_priv->rps.efficient_freq + 1 &&
4862 val > dev_priv->rps.cur_freq)
4863 new_power = BETWEEN;
4867 if (val <= dev_priv->rps.efficient_freq &&
4868 val < dev_priv->rps.cur_freq)
4869 new_power = LOW_POWER;
4870 else if (val >= dev_priv->rps.rp0_freq &&
4871 val > dev_priv->rps.cur_freq)
4872 new_power = HIGH_POWER;
4876 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
4877 val < dev_priv->rps.cur_freq)
4878 new_power = BETWEEN;
4881 /* Max/min bins are special */
4882 if (val <= dev_priv->rps.min_freq_softlimit)
4883 new_power = LOW_POWER;
4884 if (val >= dev_priv->rps.max_freq_softlimit)
4885 new_power = HIGH_POWER;
4886 if (new_power == dev_priv->rps.power)
4889 /* Note the units here are not exactly 1us, but 1280ns. */
4890 switch (new_power) {
4892 /* Upclock if more than 95% busy over 16ms */
4896 /* Downclock if less than 85% busy over 32ms */
4898 threshold_down = 85;
4902 /* Upclock if more than 90% busy over 13ms */
4906 /* Downclock if less than 75% busy over 32ms */
4908 threshold_down = 75;
4912 /* Upclock if more than 85% busy over 10ms */
4916 /* Downclock if less than 60% busy over 32ms */
4918 threshold_down = 60;
4922 /* When byt can survive without system hang with dynamic
4923 * sw freq adjustments, this restriction can be lifted.
4925 if (IS_VALLEYVIEW(dev_priv))
4928 I915_WRITE(GEN6_RP_UP_EI,
4929 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4930 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4931 GT_INTERVAL_FROM_US(dev_priv,
4932 ei_up * threshold_up / 100));
4934 I915_WRITE(GEN6_RP_DOWN_EI,
4935 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4936 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4937 GT_INTERVAL_FROM_US(dev_priv,
4938 ei_down * threshold_down / 100));
4940 I915_WRITE(GEN6_RP_CONTROL,
4941 GEN6_RP_MEDIA_TURBO |
4942 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4943 GEN6_RP_MEDIA_IS_GFX |
4945 GEN6_RP_UP_BUSY_AVG |
4946 GEN6_RP_DOWN_IDLE_AVG);
4949 dev_priv->rps.power = new_power;
4950 dev_priv->rps.up_threshold = threshold_up;
4951 dev_priv->rps.down_threshold = threshold_down;
4952 dev_priv->rps.last_adj = 0;
4955 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4959 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
4960 if (val > dev_priv->rps.min_freq_softlimit)
4961 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4962 if (val < dev_priv->rps.max_freq_softlimit)
4963 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4965 mask &= dev_priv->pm_rps_events;
4967 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
4970 /* gen6_set_rps is called to update the frequency request, but should also be
4971 * called when the range (min_delay and max_delay) is modified so that we can
4972 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4973 static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
4975 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4976 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
4979 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4980 WARN_ON(val > dev_priv->rps.max_freq);
4981 WARN_ON(val < dev_priv->rps.min_freq);
4983 /* min/max delay may still have been modified so be sure to
4984 * write the limits value.
4986 if (val != dev_priv->rps.cur_freq) {
4987 gen6_set_rps_thresholds(dev_priv, val);
4989 if (IS_GEN9(dev_priv))
4990 I915_WRITE(GEN6_RPNSWREQ,
4991 GEN9_FREQUENCY(val));
4992 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4993 I915_WRITE(GEN6_RPNSWREQ,
4994 HSW_FREQUENCY(val));
4996 I915_WRITE(GEN6_RPNSWREQ,
4997 GEN6_FREQUENCY(val) |
4999 GEN6_AGGRESSIVE_TURBO);
5002 /* Make sure we continue to get interrupts
5003 * until we hit the minimum or maximum frequencies.
5005 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
5006 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
5008 POSTING_READ(GEN6_RPNSWREQ);
5010 dev_priv->rps.cur_freq = val;
5011 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
5014 static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
5016 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5017 WARN_ON(val > dev_priv->rps.max_freq);
5018 WARN_ON(val < dev_priv->rps.min_freq);
5020 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
5021 "Odd GPU freq value\n"))
5024 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
5026 if (val != dev_priv->rps.cur_freq) {
5027 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
5028 if (!IS_CHERRYVIEW(dev_priv))
5029 gen6_set_rps_thresholds(dev_priv, val);
5032 dev_priv->rps.cur_freq = val;
5033 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
5036 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
5038 * * If Gfx is Idle, then
5039 * 1. Forcewake Media well.
5040 * 2. Request idle freq.
5041 * 3. Release Forcewake of Media well.
5043 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
5045 u32 val = dev_priv->rps.idle_freq;
5047 if (dev_priv->rps.cur_freq <= val)
5050 /* Wake up the media well, as that takes a lot less
5051 * power than the Render well. */
5052 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
5053 valleyview_set_rps(dev_priv, val);
5054 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
5057 void gen6_rps_busy(struct drm_i915_private *dev_priv)
5059 mutex_lock(&dev_priv->rps.hw_lock);
5060 if (dev_priv->rps.enabled) {
5061 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
5062 gen6_rps_reset_ei(dev_priv);
5063 I915_WRITE(GEN6_PMINTRMSK,
5064 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
5066 gen6_enable_rps_interrupts(dev_priv);
5068 /* Ensure we start at the user's desired frequency */
5069 intel_set_rps(dev_priv,
5070 clamp(dev_priv->rps.cur_freq,
5071 dev_priv->rps.min_freq_softlimit,
5072 dev_priv->rps.max_freq_softlimit));
5074 mutex_unlock(&dev_priv->rps.hw_lock);
5077 void gen6_rps_idle(struct drm_i915_private *dev_priv)
5079 /* Flush our bottom-half so that it does not race with us
5080 * setting the idle frequency and so that it is bounded by
5081 * our rpm wakeref. And then disable the interrupts to stop any
5082 * futher RPS reclocking whilst we are asleep.
5084 gen6_disable_rps_interrupts(dev_priv);
5086 mutex_lock(&dev_priv->rps.hw_lock);
5087 if (dev_priv->rps.enabled) {
5088 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5089 vlv_set_rps_idle(dev_priv);
5091 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5092 dev_priv->rps.last_adj = 0;
5093 I915_WRITE(GEN6_PMINTRMSK,
5094 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
5096 mutex_unlock(&dev_priv->rps.hw_lock);
5098 spin_lock(&dev_priv->rps.client_lock);
5099 while (!list_empty(&dev_priv->rps.clients))
5100 list_del_init(dev_priv->rps.clients.next);
5101 spin_unlock(&dev_priv->rps.client_lock);
5104 void gen6_rps_boost(struct drm_i915_private *dev_priv,
5105 struct intel_rps_client *rps,
5106 unsigned long submitted)
5108 /* This is intentionally racy! We peek at the state here, then
5109 * validate inside the RPS worker.
5111 if (!(dev_priv->gt.awake &&
5112 dev_priv->rps.enabled &&
5113 dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
5116 /* Force a RPS boost (and don't count it against the client) if
5117 * the GPU is severely congested.
5119 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
5122 spin_lock(&dev_priv->rps.client_lock);
5123 if (rps == NULL || list_empty(&rps->link)) {
5124 spin_lock_irq(&dev_priv->irq_lock);
5125 if (dev_priv->rps.interrupts_enabled) {
5126 dev_priv->rps.client_boost = true;
5127 schedule_work(&dev_priv->rps.work);
5129 spin_unlock_irq(&dev_priv->irq_lock);
5132 list_add(&rps->link, &dev_priv->rps.clients);
5135 dev_priv->rps.boosts++;
5137 spin_unlock(&dev_priv->rps.client_lock);
5140 void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
5142 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5143 valleyview_set_rps(dev_priv, val);
5145 gen6_set_rps(dev_priv, val);
5148 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
5150 I915_WRITE(GEN6_RC_CONTROL, 0);
5151 I915_WRITE(GEN9_PG_ENABLE, 0);
5154 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
5156 I915_WRITE(GEN6_RP_CONTROL, 0);
5159 static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
5161 I915_WRITE(GEN6_RC_CONTROL, 0);
5164 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
5166 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
5167 I915_WRITE(GEN6_RP_CONTROL, 0);
5170 static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
5172 I915_WRITE(GEN6_RC_CONTROL, 0);
5175 static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
5177 /* we're doing forcewake before Disabling RC6,
5178 * This what the BIOS expects when going into suspend */
5179 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5181 I915_WRITE(GEN6_RC_CONTROL, 0);
5183 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5186 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
5188 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5189 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
5190 mode = GEN6_RC_CTL_RC6_ENABLE;
5194 if (HAS_RC6p(dev_priv))
5195 DRM_DEBUG_DRIVER("Enabling RC6 states: "
5196 "RC6 %s RC6p %s RC6pp %s\n",
5197 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
5198 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
5199 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
5202 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
5203 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
5206 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
5208 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5209 bool enable_rc6 = true;
5210 unsigned long rc6_ctx_base;
5214 rc_ctl = I915_READ(GEN6_RC_CONTROL);
5215 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
5216 RC_SW_TARGET_STATE_SHIFT;
5217 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5218 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5219 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
5220 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
5223 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
5224 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
5229 * The exact context size is not known for BXT, so assume a page size
5232 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
5233 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
5234 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
5235 ggtt->stolen_reserved_size))) {
5236 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
5240 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
5241 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
5242 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
5243 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
5244 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
5248 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
5249 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
5250 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
5251 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5255 if (!I915_READ(GEN6_GFXPAUSE)) {
5256 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5260 if (!I915_READ(GEN8_MISC_CTRL0)) {
5261 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5268 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
5270 /* No RC6 before Ironlake and code is gone for ilk. */
5271 if (INTEL_INFO(dev_priv)->gen < 6)
5277 if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
5278 DRM_INFO("RC6 disabled by BIOS\n");
5282 /* Respect the kernel parameter if it is set */
5283 if (enable_rc6 >= 0) {
5286 if (HAS_RC6p(dev_priv))
5287 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
5290 mask = INTEL_RC6_ENABLE;
5292 if ((enable_rc6 & mask) != enable_rc6)
5293 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5294 "(requested %d, valid %d)\n",
5295 enable_rc6 & mask, enable_rc6, mask);
5297 return enable_rc6 & mask;
5300 if (IS_IVYBRIDGE(dev_priv))
5301 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
5303 return INTEL_RC6_ENABLE;
5306 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
5308 /* All of these values are in units of 50MHz */
5310 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
5311 if (IS_BROXTON(dev_priv)) {
5312 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
5313 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
5314 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5315 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
5317 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
5318 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
5319 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5320 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
5322 /* hw_max = RP0 until we check for overclocking */
5323 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
5325 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
5326 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
5327 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5328 u32 ddcc_status = 0;
5330 if (sandybridge_pcode_read(dev_priv,
5331 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
5333 dev_priv->rps.efficient_freq =
5335 ((ddcc_status >> 8) & 0xff),
5336 dev_priv->rps.min_freq,
5337 dev_priv->rps.max_freq);
5340 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5341 /* Store the frequency values in 16.66 MHZ units, which is
5342 * the natural hardware unit for SKL
5344 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
5345 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
5346 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
5347 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
5348 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
5352 static void reset_rps(struct drm_i915_private *dev_priv,
5353 void (*set)(struct drm_i915_private *, u8))
5355 u8 freq = dev_priv->rps.cur_freq;
5358 dev_priv->rps.power = -1;
5359 dev_priv->rps.cur_freq = -1;
5361 set(dev_priv, freq);
5364 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
5365 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
5367 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5369 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
5370 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5372 * BIOS could leave the Hw Turbo enabled, so need to explicitly
5373 * clear out the Control register just to avoid inconsitency
5374 * with debugfs interface, which will show Turbo as enabled
5375 * only and that is not expected by the User after adding the
5376 * WaGsvDisableTurbo. Apart from this there is no problem even
5377 * if the Turbo is left enabled in the Control register, as the
5378 * Up/Down interrupts would remain masked.
5380 gen9_disable_rps(dev_priv);
5381 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5385 /* Program defaults and thresholds for RPS*/
5386 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5387 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
5389 /* 1 second timeout*/
5390 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
5391 GT_INTERVAL_FROM_US(dev_priv, 1000000));
5393 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
5395 /* Leaning on the below call to gen6_set_rps to program/setup the
5396 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5397 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
5398 reset_rps(dev_priv, gen6_set_rps);
5400 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5403 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
5405 struct intel_engine_cs *engine;
5406 uint32_t rc6_mask = 0;
5408 /* 1a: Software RC state - RC0 */
5409 I915_WRITE(GEN6_RC_STATE, 0);
5411 /* 1b: Get forcewake during program sequence. Although the driver
5412 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5413 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5415 /* 2a: Disable RC states. */
5416 I915_WRITE(GEN6_RC_CONTROL, 0);
5418 /* 2b: Program RC6 thresholds.*/
5420 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
5421 if (IS_SKYLAKE(dev_priv))
5422 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
5424 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
5425 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5426 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5427 for_each_engine(engine, dev_priv)
5428 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5430 if (HAS_GUC(dev_priv))
5431 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
5433 I915_WRITE(GEN6_RC_SLEEP, 0);
5435 /* 2c: Program Coarse Power Gating Policies. */
5436 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
5437 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
5439 /* 3a: Enable RC6 */
5440 if (!dev_priv->rps.ctx_corrupted &&
5441 intel_enable_rc6() & INTEL_RC6_ENABLE)
5442 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5443 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
5444 /* WaRsUseTimeoutMode */
5445 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
5446 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5447 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
5448 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5449 GEN7_RC_CTL_TO_MODE |
5452 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
5453 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5454 GEN6_RC_CTL_EI_MODE(1) |
5459 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
5460 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
5462 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
5463 I915_WRITE(GEN9_PG_ENABLE, 0);
5465 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
5466 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
5468 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5471 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
5473 struct intel_engine_cs *engine;
5474 uint32_t rc6_mask = 0;
5476 /* 1a: Software RC state - RC0 */
5477 I915_WRITE(GEN6_RC_STATE, 0);
5479 /* 1c & 1d: Get forcewake during program sequence. Although the driver
5480 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5481 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5483 /* 2a: Disable RC states. */
5484 I915_WRITE(GEN6_RC_CONTROL, 0);
5486 /* 2b: Program RC6 thresholds.*/
5487 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5488 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5489 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5490 for_each_engine(engine, dev_priv)
5491 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5492 I915_WRITE(GEN6_RC_SLEEP, 0);
5493 if (IS_BROADWELL(dev_priv))
5494 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
5496 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
5499 if (!dev_priv->rps.ctx_corrupted &&
5500 intel_enable_rc6() & INTEL_RC6_ENABLE)
5501 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5502 intel_print_rc6_info(dev_priv, rc6_mask);
5503 if (IS_BROADWELL(dev_priv))
5504 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5505 GEN7_RC_CTL_TO_MODE |
5508 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5509 GEN6_RC_CTL_EI_MODE(1) |
5512 /* 4 Program defaults and thresholds for RPS*/
5513 I915_WRITE(GEN6_RPNSWREQ,
5514 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5515 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5516 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5517 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5518 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
5520 /* Docs recommend 900MHz, and 300 MHz respectively */
5521 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
5522 dev_priv->rps.max_freq_softlimit << 24 |
5523 dev_priv->rps.min_freq_softlimit << 16);
5525 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
5526 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5527 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
5528 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
5530 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5533 I915_WRITE(GEN6_RP_CONTROL,
5534 GEN6_RP_MEDIA_TURBO |
5535 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5536 GEN6_RP_MEDIA_IS_GFX |
5538 GEN6_RP_UP_BUSY_AVG |
5539 GEN6_RP_DOWN_IDLE_AVG);
5541 /* 6: Ring frequency + overclocking (our driver does this later */
5543 reset_rps(dev_priv, gen6_set_rps);
5545 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5548 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5550 struct intel_engine_cs *engine;
5551 u32 rc6vids, rc6_mask = 0;
5556 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5558 /* Here begins a magic sequence of register writes to enable
5559 * auto-downclocking.
5561 * Perhaps there might be some value in exposing these to
5564 I915_WRITE(GEN6_RC_STATE, 0);
5566 /* Clear the DBG now so we don't confuse earlier errors */
5567 gtfifodbg = I915_READ(GTFIFODBG);
5569 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
5570 I915_WRITE(GTFIFODBG, gtfifodbg);
5573 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5575 /* disable the counters and set deterministic thresholds */
5576 I915_WRITE(GEN6_RC_CONTROL, 0);
5578 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
5579 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
5580 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
5581 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5582 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5584 for_each_engine(engine, dev_priv)
5585 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5587 I915_WRITE(GEN6_RC_SLEEP, 0);
5588 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5589 if (IS_IVYBRIDGE(dev_priv))
5590 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5592 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
5593 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
5594 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5596 /* Check if we are enabling RC6 */
5597 rc6_mode = intel_enable_rc6();
5598 if (rc6_mode & INTEL_RC6_ENABLE)
5599 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5601 /* We don't use those on Haswell */
5602 if (!IS_HASWELL(dev_priv)) {
5603 if (rc6_mode & INTEL_RC6p_ENABLE)
5604 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5606 if (rc6_mode & INTEL_RC6pp_ENABLE)
5607 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5610 intel_print_rc6_info(dev_priv, rc6_mask);
5612 I915_WRITE(GEN6_RC_CONTROL,
5614 GEN6_RC_CTL_EI_MODE(1) |
5615 GEN6_RC_CTL_HW_ENABLE);
5617 /* Power down if completely idle for over 50ms */
5618 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
5619 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5621 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
5623 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
5625 reset_rps(dev_priv, gen6_set_rps);
5628 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5629 if (IS_GEN6(dev_priv) && ret) {
5630 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5631 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5632 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5633 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5634 rc6vids &= 0xffff00;
5635 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5636 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5638 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5641 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5644 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5647 unsigned int gpu_freq;
5648 unsigned int max_ia_freq, min_ring_freq;
5649 unsigned int max_gpu_freq, min_gpu_freq;
5650 int scaling_factor = 180;
5651 struct cpufreq_policy *policy;
5653 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5655 policy = cpufreq_cpu_get(0);
5657 max_ia_freq = policy->cpuinfo.max_freq;
5658 cpufreq_cpu_put(policy);
5661 * Default to measured freq if none found, PCU will ensure we
5664 max_ia_freq = tsc_khz;
5667 /* Convert from kHz to MHz */
5668 max_ia_freq /= 1000;
5670 min_ring_freq = I915_READ(DCLK) & 0xf;
5671 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5672 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5674 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5675 /* Convert GT frequency to 50 HZ units */
5676 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5677 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5679 min_gpu_freq = dev_priv->rps.min_freq;
5680 max_gpu_freq = dev_priv->rps.max_freq;
5684 * For each potential GPU frequency, load a ring frequency we'd like
5685 * to use for memory access. We do this by specifying the IA frequency
5686 * the PCU should use as a reference to determine the ring frequency.
5688 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5689 int diff = max_gpu_freq - gpu_freq;
5690 unsigned int ia_freq = 0, ring_freq = 0;
5692 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5694 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5695 * No floor required for ring frequency on SKL.
5697 ring_freq = gpu_freq;
5698 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
5699 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5700 ring_freq = max(min_ring_freq, gpu_freq);
5701 } else if (IS_HASWELL(dev_priv)) {
5702 ring_freq = mult_frac(gpu_freq, 5, 4);
5703 ring_freq = max(min_ring_freq, ring_freq);
5704 /* leave ia_freq as the default, chosen by cpufreq */
5706 /* On older processors, there is no separate ring
5707 * clock domain, so in order to boost the bandwidth
5708 * of the ring, we need to upclock the CPU (ia_freq).
5710 * For GPU frequencies less than 750MHz,
5711 * just use the lowest ring freq.
5713 if (gpu_freq < min_freq)
5716 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5717 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5720 sandybridge_pcode_write(dev_priv,
5721 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
5722 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5723 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5728 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5732 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5734 switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
5736 /* (2 * 4) config */
5737 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5740 /* (2 * 6) config */
5741 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5744 /* (2 * 8) config */
5746 /* Setting (2 * 8) Min RP0 for any other combination */
5747 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5751 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5756 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5760 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5761 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5766 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5770 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5771 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5776 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5780 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5782 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5787 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
5791 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5793 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5795 rp0 = min_t(u32, rp0, 0xea);
5800 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5804 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
5805 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
5806 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
5807 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5812 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
5816 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5818 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5819 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5820 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5821 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5822 * to make sure it matches what Punit accepts.
5824 return max_t(u32, val, 0xc0);
5827 /* Check that the pctx buffer wasn't move under us. */
5828 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5830 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5832 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5833 dev_priv->vlv_pctx->stolen->start);
5837 /* Check that the pcbr address is not empty. */
5838 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5840 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5842 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5845 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
5847 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5848 unsigned long pctx_paddr, paddr;
5850 int pctx_size = 32*1024;
5852 pcbr = I915_READ(VLV_PCBR);
5853 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5854 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5855 paddr = (dev_priv->mm.stolen_base +
5856 (ggtt->stolen_size - pctx_size));
5858 pctx_paddr = (paddr & (~4095));
5859 I915_WRITE(VLV_PCBR, pctx_paddr);
5862 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5865 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5867 struct drm_i915_gem_object *pctx;
5868 unsigned long pctx_paddr;
5870 int pctx_size = 24*1024;
5872 pcbr = I915_READ(VLV_PCBR);
5874 /* BIOS set it up already, grab the pre-alloc'd space */
5877 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5878 pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
5880 I915_GTT_OFFSET_NONE,
5885 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5888 * From the Gunit register HAS:
5889 * The Gfx driver is expected to program this register and ensure
5890 * proper allocation within Gfx stolen memory. For example, this
5891 * register should be programmed such than the PCBR range does not
5892 * overlap with other ranges, such as the frame buffer, protected
5893 * memory, or any other relevant ranges.
5895 pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
5897 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5901 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5902 I915_WRITE(VLV_PCBR, pctx_paddr);
5905 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5906 dev_priv->vlv_pctx = pctx;
5909 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
5911 if (WARN_ON(!dev_priv->vlv_pctx))
5914 i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
5915 dev_priv->vlv_pctx = NULL;
5918 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5920 dev_priv->rps.gpll_ref_freq =
5921 vlv_get_cck_clock(dev_priv, "GPLL ref",
5922 CCK_GPLL_CLOCK_CONTROL,
5923 dev_priv->czclk_freq);
5925 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5926 dev_priv->rps.gpll_ref_freq);
5929 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5933 valleyview_setup_pctx(dev_priv);
5935 vlv_init_gpll_ref_freq(dev_priv);
5937 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5938 switch ((val >> 6) & 3) {
5941 dev_priv->mem_freq = 800;
5944 dev_priv->mem_freq = 1066;
5947 dev_priv->mem_freq = 1333;
5950 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5952 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5953 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5954 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5955 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5956 dev_priv->rps.max_freq);
5958 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5959 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5960 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5961 dev_priv->rps.efficient_freq);
5963 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5964 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5965 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5966 dev_priv->rps.rp1_freq);
5968 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5969 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5970 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5971 dev_priv->rps.min_freq);
5974 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5978 cherryview_setup_pctx(dev_priv);
5980 vlv_init_gpll_ref_freq(dev_priv);
5982 mutex_lock(&dev_priv->sb_lock);
5983 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5984 mutex_unlock(&dev_priv->sb_lock);
5986 switch ((val >> 2) & 0x7) {
5988 dev_priv->mem_freq = 2000;
5991 dev_priv->mem_freq = 1600;
5994 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5996 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5997 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5998 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5999 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
6000 dev_priv->rps.max_freq);
6002 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
6003 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
6004 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
6005 dev_priv->rps.efficient_freq);
6007 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
6008 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
6009 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
6010 dev_priv->rps.rp1_freq);
6012 /* PUnit validated range is only [RPe, RP0] */
6013 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
6014 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
6015 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
6016 dev_priv->rps.min_freq);
6018 WARN_ONCE((dev_priv->rps.max_freq |
6019 dev_priv->rps.efficient_freq |
6020 dev_priv->rps.rp1_freq |
6021 dev_priv->rps.min_freq) & 1,
6022 "Odd GPU freq values\n");
6025 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6027 valleyview_cleanup_pctx(dev_priv);
6030 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
6032 struct intel_engine_cs *engine;
6033 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
6035 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6037 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
6038 GT_FIFO_FREE_ENTRIES_CHV);
6040 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6042 I915_WRITE(GTFIFODBG, gtfifodbg);
6045 cherryview_check_pctx(dev_priv);
6047 /* 1a & 1b: Get forcewake during program sequence. Although the driver
6048 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6049 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6051 /* Disable RC states. */
6052 I915_WRITE(GEN6_RC_CONTROL, 0);
6054 /* 2a: Program RC6 thresholds.*/
6055 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
6056 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6057 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6059 for_each_engine(engine, dev_priv)
6060 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6061 I915_WRITE(GEN6_RC_SLEEP, 0);
6063 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
6064 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
6066 /* allows RC6 residency counter to work */
6067 I915_WRITE(VLV_COUNTER_CONTROL,
6068 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
6069 VLV_MEDIA_RC6_COUNT_EN |
6070 VLV_RENDER_RC6_COUNT_EN));
6072 /* For now we assume BIOS is allocating and populating the PCBR */
6073 pcbr = I915_READ(VLV_PCBR);
6076 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
6077 (pcbr >> VLV_PCBR_ADDR_SHIFT))
6078 rc6_mode = GEN7_RC_CTL_TO_MODE;
6080 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
6082 /* 4 Program defaults and thresholds for RPS*/
6083 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6084 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
6085 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
6086 I915_WRITE(GEN6_RP_UP_EI, 66000);
6087 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
6089 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6092 I915_WRITE(GEN6_RP_CONTROL,
6093 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6094 GEN6_RP_MEDIA_IS_GFX |
6096 GEN6_RP_UP_BUSY_AVG |
6097 GEN6_RP_DOWN_IDLE_AVG);
6099 /* Setting Fixed Bias */
6100 val = VLV_OVERRIDE_EN |
6102 CHV_BIAS_CPU_50_SOC_50;
6103 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6105 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6107 /* RPS code assumes GPLL is used */
6108 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6110 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
6111 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6113 reset_rps(dev_priv, valleyview_set_rps);
6115 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6118 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
6120 struct intel_engine_cs *engine;
6121 u32 gtfifodbg, val, rc6_mode = 0;
6123 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6125 valleyview_check_pctx(dev_priv);
6127 gtfifodbg = I915_READ(GTFIFODBG);
6129 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6131 I915_WRITE(GTFIFODBG, gtfifodbg);
6134 /* If VLV, Forcewake all wells, else re-direct to regular path */
6135 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6137 /* Disable RC states. */
6138 I915_WRITE(GEN6_RC_CONTROL, 0);
6140 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6141 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
6142 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
6143 I915_WRITE(GEN6_RP_UP_EI, 66000);
6144 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
6146 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6148 I915_WRITE(GEN6_RP_CONTROL,
6149 GEN6_RP_MEDIA_TURBO |
6150 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6151 GEN6_RP_MEDIA_IS_GFX |
6153 GEN6_RP_UP_BUSY_AVG |
6154 GEN6_RP_DOWN_IDLE_CONT);
6156 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
6157 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6158 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6160 for_each_engine(engine, dev_priv)
6161 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6163 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
6165 /* allows RC6 residency counter to work */
6166 I915_WRITE(VLV_COUNTER_CONTROL,
6167 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
6168 VLV_RENDER_RC0_COUNT_EN |
6169 VLV_MEDIA_RC6_COUNT_EN |
6170 VLV_RENDER_RC6_COUNT_EN));
6172 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
6173 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
6175 intel_print_rc6_info(dev_priv, rc6_mode);
6177 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
6179 /* Setting Fixed Bias */
6180 val = VLV_OVERRIDE_EN |
6182 VLV_BIAS_CPU_125_SOC_875;
6183 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6185 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6187 /* RPS code assumes GPLL is used */
6188 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6190 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
6191 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6193 reset_rps(dev_priv, valleyview_set_rps);
6195 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6198 static unsigned long intel_pxfreq(u32 vidfreq)
6201 int div = (vidfreq & 0x3f0000) >> 16;
6202 int post = (vidfreq & 0x3000) >> 12;
6203 int pre = (vidfreq & 0x7);
6208 freq = ((div * 133333) / ((1<<post) * pre));
6213 static const struct cparams {
6219 { 1, 1333, 301, 28664 },
6220 { 1, 1066, 294, 24460 },
6221 { 1, 800, 294, 25192 },
6222 { 0, 1333, 276, 27605 },
6223 { 0, 1066, 276, 27605 },
6224 { 0, 800, 231, 23784 },
6227 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
6229 u64 total_count, diff, ret;
6230 u32 count1, count2, count3, m = 0, c = 0;
6231 unsigned long now = jiffies_to_msecs(jiffies), diff1;
6234 assert_spin_locked(&mchdev_lock);
6236 diff1 = now - dev_priv->ips.last_time1;
6238 /* Prevent division-by-zero if we are asking too fast.
6239 * Also, we don't get interesting results if we are polling
6240 * faster than once in 10ms, so just return the saved value
6244 return dev_priv->ips.chipset_power;
6246 count1 = I915_READ(DMIEC);
6247 count2 = I915_READ(DDREC);
6248 count3 = I915_READ(CSIEC);
6250 total_count = count1 + count2 + count3;
6252 /* FIXME: handle per-counter overflow */
6253 if (total_count < dev_priv->ips.last_count1) {
6254 diff = ~0UL - dev_priv->ips.last_count1;
6255 diff += total_count;
6257 diff = total_count - dev_priv->ips.last_count1;
6260 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
6261 if (cparams[i].i == dev_priv->ips.c_m &&
6262 cparams[i].t == dev_priv->ips.r_t) {
6269 diff = div_u64(diff, diff1);
6270 ret = ((m * diff) + c);
6271 ret = div_u64(ret, 10);
6273 dev_priv->ips.last_count1 = total_count;
6274 dev_priv->ips.last_time1 = now;
6276 dev_priv->ips.chipset_power = ret;
6281 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
6285 if (INTEL_INFO(dev_priv)->gen != 5)
6288 spin_lock_irq(&mchdev_lock);
6290 val = __i915_chipset_val(dev_priv);
6292 spin_unlock_irq(&mchdev_lock);
6297 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
6299 unsigned long m, x, b;
6302 tsfs = I915_READ(TSFS);
6304 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
6305 x = I915_READ8(TR1);
6307 b = tsfs & TSFS_INTR_MASK;
6309 return ((m * x) / 127) - b;
6312 static int _pxvid_to_vd(u8 pxvid)
6317 if (pxvid >= 8 && pxvid < 31)
6320 return (pxvid + 2) * 125;
6323 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
6325 const int vd = _pxvid_to_vd(pxvid);
6326 const int vm = vd - 1125;
6328 if (INTEL_INFO(dev_priv)->is_mobile)
6329 return vm > 0 ? vm : 0;
6334 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
6336 u64 now, diff, diffms;
6339 assert_spin_locked(&mchdev_lock);
6341 now = ktime_get_raw_ns();
6342 diffms = now - dev_priv->ips.last_time2;
6343 do_div(diffms, NSEC_PER_MSEC);
6345 /* Don't divide by 0 */
6349 count = I915_READ(GFXEC);
6351 if (count < dev_priv->ips.last_count2) {
6352 diff = ~0UL - dev_priv->ips.last_count2;
6355 diff = count - dev_priv->ips.last_count2;
6358 dev_priv->ips.last_count2 = count;
6359 dev_priv->ips.last_time2 = now;
6361 /* More magic constants... */
6363 diff = div_u64(diff, diffms * 10);
6364 dev_priv->ips.gfx_power = diff;
6367 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
6369 if (INTEL_INFO(dev_priv)->gen != 5)
6372 spin_lock_irq(&mchdev_lock);
6374 __i915_update_gfx_val(dev_priv);
6376 spin_unlock_irq(&mchdev_lock);
6379 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
6381 unsigned long t, corr, state1, corr2, state2;
6384 assert_spin_locked(&mchdev_lock);
6386 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
6387 pxvid = (pxvid >> 24) & 0x7f;
6388 ext_v = pvid_to_extvid(dev_priv, pxvid);
6392 t = i915_mch_val(dev_priv);
6394 /* Revel in the empirically derived constants */
6396 /* Correction factor in 1/100000 units */
6398 corr = ((t * 2349) + 135940);
6400 corr = ((t * 964) + 29317);
6402 corr = ((t * 301) + 1004);
6404 corr = corr * ((150142 * state1) / 10000 - 78642);
6406 corr2 = (corr * dev_priv->ips.corr);
6408 state2 = (corr2 * state1) / 10000;
6409 state2 /= 100; /* convert to mW */
6411 __i915_update_gfx_val(dev_priv);
6413 return dev_priv->ips.gfx_power + state2;
6416 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
6420 if (INTEL_INFO(dev_priv)->gen != 5)
6423 spin_lock_irq(&mchdev_lock);
6425 val = __i915_gfx_val(dev_priv);
6427 spin_unlock_irq(&mchdev_lock);
6433 * i915_read_mch_val - return value for IPS use
6435 * Calculate and return a value for the IPS driver to use when deciding whether
6436 * we have thermal and power headroom to increase CPU or GPU power budget.
6438 unsigned long i915_read_mch_val(void)
6440 struct drm_i915_private *dev_priv;
6441 unsigned long chipset_val, graphics_val, ret = 0;
6443 spin_lock_irq(&mchdev_lock);
6446 dev_priv = i915_mch_dev;
6448 chipset_val = __i915_chipset_val(dev_priv);
6449 graphics_val = __i915_gfx_val(dev_priv);
6451 ret = chipset_val + graphics_val;
6454 spin_unlock_irq(&mchdev_lock);
6458 EXPORT_SYMBOL_GPL(i915_read_mch_val);
6461 * i915_gpu_raise - raise GPU frequency limit
6463 * Raise the limit; IPS indicates we have thermal headroom.
6465 bool i915_gpu_raise(void)
6467 struct drm_i915_private *dev_priv;
6470 spin_lock_irq(&mchdev_lock);
6471 if (!i915_mch_dev) {
6475 dev_priv = i915_mch_dev;
6477 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
6478 dev_priv->ips.max_delay--;
6481 spin_unlock_irq(&mchdev_lock);
6485 EXPORT_SYMBOL_GPL(i915_gpu_raise);
6488 * i915_gpu_lower - lower GPU frequency limit
6490 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6491 * frequency maximum.
6493 bool i915_gpu_lower(void)
6495 struct drm_i915_private *dev_priv;
6498 spin_lock_irq(&mchdev_lock);
6499 if (!i915_mch_dev) {
6503 dev_priv = i915_mch_dev;
6505 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6506 dev_priv->ips.max_delay++;
6509 spin_unlock_irq(&mchdev_lock);
6513 EXPORT_SYMBOL_GPL(i915_gpu_lower);
6516 * i915_gpu_busy - indicate GPU business to IPS
6518 * Tell the IPS driver whether or not the GPU is busy.
6520 bool i915_gpu_busy(void)
6524 spin_lock_irq(&mchdev_lock);
6526 ret = i915_mch_dev->gt.awake;
6527 spin_unlock_irq(&mchdev_lock);
6531 EXPORT_SYMBOL_GPL(i915_gpu_busy);
6534 * i915_gpu_turbo_disable - disable graphics turbo
6536 * Disable graphics turbo by resetting the max frequency and setting the
6537 * current frequency to the default.
6539 bool i915_gpu_turbo_disable(void)
6541 struct drm_i915_private *dev_priv;
6544 spin_lock_irq(&mchdev_lock);
6545 if (!i915_mch_dev) {
6549 dev_priv = i915_mch_dev;
6551 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6553 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6557 spin_unlock_irq(&mchdev_lock);
6561 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6564 * Tells the intel_ips driver that the i915 driver is now loaded, if
6565 * IPS got loaded first.
6567 * This awkward dance is so that neither module has to depend on the
6568 * other in order for IPS to do the appropriate communication of
6569 * GPU turbo limits to i915.
6572 ips_ping_for_i915_load(void)
6576 link = symbol_get(ips_link_to_i915_driver);
6579 symbol_put(ips_link_to_i915_driver);
6583 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6585 /* We only register the i915 ips part with intel-ips once everything is
6586 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6587 spin_lock_irq(&mchdev_lock);
6588 i915_mch_dev = dev_priv;
6589 spin_unlock_irq(&mchdev_lock);
6591 ips_ping_for_i915_load();
6594 void intel_gpu_ips_teardown(void)
6596 spin_lock_irq(&mchdev_lock);
6597 i915_mch_dev = NULL;
6598 spin_unlock_irq(&mchdev_lock);
6601 static void intel_init_emon(struct drm_i915_private *dev_priv)
6607 /* Disable to program */
6611 /* Program energy weights for various events */
6612 I915_WRITE(SDEW, 0x15040d00);
6613 I915_WRITE(CSIEW0, 0x007f0000);
6614 I915_WRITE(CSIEW1, 0x1e220004);
6615 I915_WRITE(CSIEW2, 0x04000004);
6617 for (i = 0; i < 5; i++)
6618 I915_WRITE(PEW(i), 0);
6619 for (i = 0; i < 3; i++)
6620 I915_WRITE(DEW(i), 0);
6622 /* Program P-state weights to account for frequency power adjustment */
6623 for (i = 0; i < 16; i++) {
6624 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6625 unsigned long freq = intel_pxfreq(pxvidfreq);
6626 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6631 val *= (freq / 1000);
6633 val /= (127*127*900);
6635 DRM_ERROR("bad pxval: %ld\n", val);
6638 /* Render standby states get 0 weight */
6642 for (i = 0; i < 4; i++) {
6643 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6644 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6645 I915_WRITE(PXW(i), val);
6648 /* Adjust magic regs to magic values (more experimental results) */
6649 I915_WRITE(OGW0, 0);
6650 I915_WRITE(OGW1, 0);
6651 I915_WRITE(EG0, 0x00007f00);
6652 I915_WRITE(EG1, 0x0000000e);
6653 I915_WRITE(EG2, 0x000e0000);
6654 I915_WRITE(EG3, 0x68000300);
6655 I915_WRITE(EG4, 0x42000000);
6656 I915_WRITE(EG5, 0x00140031);
6660 for (i = 0; i < 8; i++)
6661 I915_WRITE(PXWL(i), 0);
6663 /* Enable PMON + select events */
6664 I915_WRITE(ECR, 0x80000019);
6666 lcfuse = I915_READ(LCFUSE02);
6668 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6671 static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
6673 return !I915_READ(GEN8_RC6_CTX_INFO);
6676 static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
6678 if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
6681 if (i915_rc6_ctx_corrupted(i915)) {
6682 DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
6683 i915->rps.ctx_corrupted = true;
6684 intel_runtime_pm_get(i915);
6688 static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
6690 if (i915->rps.ctx_corrupted) {
6691 intel_runtime_pm_put(i915);
6692 i915->rps.ctx_corrupted = false;
6697 * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
6698 * @i915: i915 device
6700 * Perform any steps needed to clean up the RC6 CTX WA before system suspend.
6702 void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
6704 if (i915->rps.ctx_corrupted)
6705 intel_runtime_pm_put(i915);
6709 * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
6710 * @i915: i915 device
6712 * Perform any steps needed to re-init the RC6 CTX WA after system resume.
6714 void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
6716 if (!i915->rps.ctx_corrupted)
6719 if (i915_rc6_ctx_corrupted(i915)) {
6720 intel_runtime_pm_get(i915);
6724 DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
6725 i915->rps.ctx_corrupted = false;
6728 static void intel_disable_rc6(struct drm_i915_private *dev_priv);
6731 * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
6732 * @i915: i915 device
6734 * Check if an RC6 CTX corruption has happened since the last check and if so
6735 * disable RC6 and runtime power management.
6737 * Return false if no context corruption has happened since the last call of
6738 * this function, true otherwise.
6740 bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
6742 if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
6745 if (i915->rps.ctx_corrupted)
6748 if (!i915_rc6_ctx_corrupted(i915))
6751 DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
6753 intel_disable_rc6(i915);
6754 i915->rps.ctx_corrupted = true;
6755 intel_runtime_pm_get_noresume(i915);
6760 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6763 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6766 if (!i915.enable_rc6) {
6767 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6768 intel_runtime_pm_get(dev_priv);
6771 mutex_lock(&dev_priv->drm.struct_mutex);
6772 mutex_lock(&dev_priv->rps.hw_lock);
6774 i915_rc6_ctx_wa_init(dev_priv);
6776 /* Initialize RPS limits (for userspace) */
6777 if (IS_CHERRYVIEW(dev_priv))
6778 cherryview_init_gt_powersave(dev_priv);
6779 else if (IS_VALLEYVIEW(dev_priv))
6780 valleyview_init_gt_powersave(dev_priv);
6781 else if (INTEL_GEN(dev_priv) >= 6)
6782 gen6_init_rps_frequencies(dev_priv);
6784 /* Derive initial user preferences/limits from the hardware limits */
6785 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
6786 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
6788 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
6789 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
6791 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6792 dev_priv->rps.min_freq_softlimit =
6794 dev_priv->rps.efficient_freq,
6795 intel_freq_opcode(dev_priv, 450));
6797 /* After setting max-softlimit, find the overclock max freq */
6798 if (IS_GEN6(dev_priv) ||
6799 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
6802 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
6803 if (params & BIT(31)) { /* OC supported */
6804 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
6805 (dev_priv->rps.max_freq & 0xff) * 50,
6806 (params & 0xff) * 50);
6807 dev_priv->rps.max_freq = params & 0xff;
6811 /* Finally allow us to boost to max by default */
6812 dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
6814 mutex_unlock(&dev_priv->rps.hw_lock);
6815 mutex_unlock(&dev_priv->drm.struct_mutex);
6817 intel_autoenable_gt_powersave(dev_priv);
6820 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6822 if (IS_VALLEYVIEW(dev_priv))
6823 valleyview_cleanup_gt_powersave(dev_priv);
6825 i915_rc6_ctx_wa_cleanup(dev_priv);
6827 if (!i915.enable_rc6)
6828 intel_runtime_pm_put(dev_priv);
6832 * intel_suspend_gt_powersave - suspend PM work and helper threads
6833 * @dev_priv: i915 device
6835 * We don't want to disable RC6 or other features here, we just want
6836 * to make sure any work we've queued has finished and won't bother
6837 * us while we're suspended.
6839 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6841 if (INTEL_GEN(dev_priv) < 6)
6844 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
6845 intel_runtime_pm_put(dev_priv);
6847 /* gen6_rps_idle() will be called later to disable interrupts */
6850 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
6852 dev_priv->rps.enabled = true; /* force disabling */
6853 intel_disable_gt_powersave(dev_priv);
6855 gen6_reset_rps_interrupts(dev_priv);
6858 static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
6860 if (INTEL_GEN(dev_priv) >= 9)
6861 gen9_disable_rc6(dev_priv);
6862 else if (IS_CHERRYVIEW(dev_priv))
6863 cherryview_disable_rc6(dev_priv);
6864 else if (IS_VALLEYVIEW(dev_priv))
6865 valleyview_disable_rc6(dev_priv);
6866 else if (INTEL_GEN(dev_priv) >= 6)
6867 gen6_disable_rc6(dev_priv);
6870 static void intel_disable_rc6(struct drm_i915_private *dev_priv)
6872 mutex_lock(&dev_priv->rps.hw_lock);
6873 __intel_disable_rc6(dev_priv);
6874 mutex_unlock(&dev_priv->rps.hw_lock);
6877 static void intel_disable_rps(struct drm_i915_private *dev_priv)
6879 if (INTEL_GEN(dev_priv) >= 9)
6880 gen9_disable_rps(dev_priv);
6881 else if (INTEL_GEN(dev_priv) >= 6)
6882 gen6_disable_rps(dev_priv);
6883 else if (IS_IRONLAKE_M(dev_priv))
6884 ironlake_disable_drps(dev_priv);
6887 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
6889 if (!READ_ONCE(dev_priv->rps.enabled))
6892 mutex_lock(&dev_priv->rps.hw_lock);
6894 __intel_disable_rc6(dev_priv);
6895 intel_disable_rps(dev_priv);
6897 dev_priv->rps.enabled = false;
6899 mutex_unlock(&dev_priv->rps.hw_lock);
6902 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6904 /* We shouldn't be disabling as we submit, so this should be less
6905 * racy than it appears!
6907 if (READ_ONCE(dev_priv->rps.enabled))
6910 /* Powersaving is controlled by the host when inside a VM */
6911 if (intel_vgpu_active(dev_priv))
6914 mutex_lock(&dev_priv->rps.hw_lock);
6916 if (IS_CHERRYVIEW(dev_priv)) {
6917 cherryview_enable_rps(dev_priv);
6918 } else if (IS_VALLEYVIEW(dev_priv)) {
6919 valleyview_enable_rps(dev_priv);
6920 } else if (INTEL_GEN(dev_priv) >= 9) {
6921 gen9_enable_rc6(dev_priv);
6922 gen9_enable_rps(dev_priv);
6923 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
6924 gen6_update_ring_freq(dev_priv);
6925 } else if (IS_BROADWELL(dev_priv)) {
6926 gen8_enable_rps(dev_priv);
6927 gen6_update_ring_freq(dev_priv);
6928 } else if (INTEL_GEN(dev_priv) >= 6) {
6929 gen6_enable_rps(dev_priv);
6930 gen6_update_ring_freq(dev_priv);
6931 } else if (IS_IRONLAKE_M(dev_priv)) {
6932 ironlake_enable_drps(dev_priv);
6933 intel_init_emon(dev_priv);
6936 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6937 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6939 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6940 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6942 dev_priv->rps.enabled = true;
6943 mutex_unlock(&dev_priv->rps.hw_lock);
6946 static void __intel_autoenable_gt_powersave(struct work_struct *work)
6948 struct drm_i915_private *dev_priv =
6949 container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
6950 struct intel_engine_cs *rcs;
6951 struct drm_i915_gem_request *req;
6953 if (READ_ONCE(dev_priv->rps.enabled))
6956 rcs = &dev_priv->engine[RCS];
6957 if (rcs->last_context)
6960 if (!rcs->init_context)
6963 mutex_lock(&dev_priv->drm.struct_mutex);
6965 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
6969 if (!i915.enable_execlists && i915_switch_context(req) == 0)
6970 rcs->init_context(req);
6972 /* Mark the device busy, calling intel_enable_gt_powersave() */
6973 i915_add_request_no_flush(req);
6976 mutex_unlock(&dev_priv->drm.struct_mutex);
6978 intel_runtime_pm_put(dev_priv);
6981 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
6983 if (READ_ONCE(dev_priv->rps.enabled))
6986 if (IS_IRONLAKE_M(dev_priv)) {
6987 ironlake_enable_drps(dev_priv);
6988 intel_init_emon(dev_priv);
6989 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6991 * PCU communication is slow and this doesn't need to be
6992 * done at any specific time, so do this out of our fast path
6993 * to make resume and init faster.
6995 * We depend on the HW RC6 power context save/restore
6996 * mechanism when entering D3 through runtime PM suspend. So
6997 * disable RPM until RPS/RC6 is properly setup. We can only
6998 * get here via the driver load/system resume/runtime resume
6999 * paths, so the _noresume version is enough (and in case of
7000 * runtime resume it's necessary).
7002 if (queue_delayed_work(dev_priv->wq,
7003 &dev_priv->rps.autoenable_work,
7004 round_jiffies_up_relative(HZ)))
7005 intel_runtime_pm_get_noresume(dev_priv);
7009 static void ibx_init_clock_gating(struct drm_device *dev)
7011 struct drm_i915_private *dev_priv = to_i915(dev);
7014 * On Ibex Peak and Cougar Point, we need to disable clock
7015 * gating for the panel power sequencer or it will fail to
7016 * start up when no ports are active.
7018 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7021 static void g4x_disable_trickle_feed(struct drm_device *dev)
7023 struct drm_i915_private *dev_priv = to_i915(dev);
7026 for_each_pipe(dev_priv, pipe) {
7027 I915_WRITE(DSPCNTR(pipe),
7028 I915_READ(DSPCNTR(pipe)) |
7029 DISPPLANE_TRICKLE_FEED_DISABLE);
7031 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
7032 POSTING_READ(DSPSURF(pipe));
7036 static void ilk_init_lp_watermarks(struct drm_device *dev)
7038 struct drm_i915_private *dev_priv = to_i915(dev);
7040 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
7041 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
7042 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
7045 * Don't touch WM1S_LP_EN here.
7046 * Doing so could cause underruns.
7050 static void ironlake_init_clock_gating(struct drm_device *dev)
7052 struct drm_i915_private *dev_priv = to_i915(dev);
7053 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7057 * WaFbcDisableDpfcClockGating:ilk
7059 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
7060 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
7061 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
7063 I915_WRITE(PCH_3DCGDIS0,
7064 MARIUNIT_CLOCK_GATE_DISABLE |
7065 SVSMUNIT_CLOCK_GATE_DISABLE);
7066 I915_WRITE(PCH_3DCGDIS1,
7067 VFMUNIT_CLOCK_GATE_DISABLE);
7070 * According to the spec the following bits should be set in
7071 * order to enable memory self-refresh
7072 * The bit 22/21 of 0x42004
7073 * The bit 5 of 0x42020
7074 * The bit 15 of 0x45000
7076 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7077 (I915_READ(ILK_DISPLAY_CHICKEN2) |
7078 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7079 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
7080 I915_WRITE(DISP_ARB_CTL,
7081 (I915_READ(DISP_ARB_CTL) |
7084 ilk_init_lp_watermarks(dev);
7087 * Based on the document from hardware guys the following bits
7088 * should be set unconditionally in order to enable FBC.
7089 * The bit 22 of 0x42000
7090 * The bit 22 of 0x42004
7091 * The bit 7,8,9 of 0x42020.
7093 if (IS_IRONLAKE_M(dev)) {
7094 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
7095 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7096 I915_READ(ILK_DISPLAY_CHICKEN1) |
7098 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7099 I915_READ(ILK_DISPLAY_CHICKEN2) |
7103 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
7105 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7106 I915_READ(ILK_DISPLAY_CHICKEN2) |
7107 ILK_ELPIN_409_SELECT);
7108 I915_WRITE(_3D_CHICKEN2,
7109 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7110 _3D_CHICKEN2_WM_READ_PIPELINED);
7112 /* WaDisableRenderCachePipelinedFlush:ilk */
7113 I915_WRITE(CACHE_MODE_0,
7114 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7116 /* WaDisable_RenderCache_OperationalFlush:ilk */
7117 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7119 g4x_disable_trickle_feed(dev);
7121 ibx_init_clock_gating(dev);
7124 static void cpt_init_clock_gating(struct drm_device *dev)
7126 struct drm_i915_private *dev_priv = to_i915(dev);
7131 * On Ibex Peak and Cougar Point, we need to disable clock
7132 * gating for the panel power sequencer or it will fail to
7133 * start up when no ports are active.
7135 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
7136 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
7137 PCH_CPUNIT_CLOCK_GATE_DISABLE);
7138 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7139 DPLS_EDP_PPS_FIX_DIS);
7140 /* The below fixes the weird display corruption, a few pixels shifted
7141 * downward, on (only) LVDS of some HP laptops with IVY.
7143 for_each_pipe(dev_priv, pipe) {
7144 val = I915_READ(TRANS_CHICKEN2(pipe));
7145 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
7146 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7147 if (dev_priv->vbt.fdi_rx_polarity_inverted)
7148 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7149 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
7150 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
7151 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
7152 I915_WRITE(TRANS_CHICKEN2(pipe), val);
7154 /* WADP0ClockGatingDisable */
7155 for_each_pipe(dev_priv, pipe) {
7156 I915_WRITE(TRANS_CHICKEN1(pipe),
7157 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7161 static void gen6_check_mch_setup(struct drm_device *dev)
7163 struct drm_i915_private *dev_priv = to_i915(dev);
7166 tmp = I915_READ(MCH_SSKPD);
7167 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
7168 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
7172 static void gen6_init_clock_gating(struct drm_device *dev)
7174 struct drm_i915_private *dev_priv = to_i915(dev);
7175 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7177 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
7179 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7180 I915_READ(ILK_DISPLAY_CHICKEN2) |
7181 ILK_ELPIN_409_SELECT);
7183 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
7184 I915_WRITE(_3D_CHICKEN,
7185 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
7187 /* WaDisable_RenderCache_OperationalFlush:snb */
7188 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7191 * BSpec recoomends 8x4 when MSAA is used,
7192 * however in practice 16x4 seems fastest.
7194 * Note that PS/WM thread counts depend on the WIZ hashing
7195 * disable bit, which we don't touch here, but it's good
7196 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7198 I915_WRITE(GEN6_GT_MODE,
7199 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7201 ilk_init_lp_watermarks(dev);
7203 I915_WRITE(CACHE_MODE_0,
7204 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
7206 I915_WRITE(GEN6_UCGCTL1,
7207 I915_READ(GEN6_UCGCTL1) |
7208 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
7209 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7211 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7212 * gating disable must be set. Failure to set it results in
7213 * flickering pixels due to Z write ordering failures after
7214 * some amount of runtime in the Mesa "fire" demo, and Unigine
7215 * Sanctuary and Tropics, and apparently anything else with
7216 * alpha test or pixel discard.
7218 * According to the spec, bit 11 (RCCUNIT) must also be set,
7219 * but we didn't debug actual testcases to find it out.
7221 * WaDisableRCCUnitClockGating:snb
7222 * WaDisableRCPBUnitClockGating:snb
7224 I915_WRITE(GEN6_UCGCTL2,
7225 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7226 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7228 /* WaStripsFansDisableFastClipPerformanceFix:snb */
7229 I915_WRITE(_3D_CHICKEN3,
7230 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
7234 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
7235 * 3DSTATE_SF number of SF output attributes is more than 16."
7237 I915_WRITE(_3D_CHICKEN3,
7238 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
7241 * According to the spec the following bits should be
7242 * set in order to enable memory self-refresh and fbc:
7243 * The bit21 and bit22 of 0x42000
7244 * The bit21 and bit22 of 0x42004
7245 * The bit5 and bit7 of 0x42020
7246 * The bit14 of 0x70180
7247 * The bit14 of 0x71180
7249 * WaFbcAsynchFlipDisableFbcQueue:snb
7251 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7252 I915_READ(ILK_DISPLAY_CHICKEN1) |
7253 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7254 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7255 I915_READ(ILK_DISPLAY_CHICKEN2) |
7256 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7257 I915_WRITE(ILK_DSPCLK_GATE_D,
7258 I915_READ(ILK_DSPCLK_GATE_D) |
7259 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
7260 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
7262 g4x_disable_trickle_feed(dev);
7264 cpt_init_clock_gating(dev);
7266 gen6_check_mch_setup(dev);
7269 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
7271 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
7274 * WaVSThreadDispatchOverride:ivb,vlv
7276 * This actually overrides the dispatch
7277 * mode for all thread types.
7279 reg &= ~GEN7_FF_SCHED_MASK;
7280 reg |= GEN7_FF_TS_SCHED_HW;
7281 reg |= GEN7_FF_VS_SCHED_HW;
7282 reg |= GEN7_FF_DS_SCHED_HW;
7284 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
7287 static void lpt_init_clock_gating(struct drm_device *dev)
7289 struct drm_i915_private *dev_priv = to_i915(dev);
7292 * TODO: this bit should only be enabled when really needed, then
7293 * disabled when not needed anymore in order to save power.
7295 if (HAS_PCH_LPT_LP(dev))
7296 I915_WRITE(SOUTH_DSPCLK_GATE_D,
7297 I915_READ(SOUTH_DSPCLK_GATE_D) |
7298 PCH_LP_PARTITION_LEVEL_DISABLE);
7300 /* WADPOClockGatingDisable:hsw */
7301 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
7302 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
7303 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7306 static void lpt_suspend_hw(struct drm_device *dev)
7308 struct drm_i915_private *dev_priv = to_i915(dev);
7310 if (HAS_PCH_LPT_LP(dev)) {
7311 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
7313 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7314 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7318 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7319 int general_prio_credits,
7320 int high_prio_credits)
7324 /* WaTempDisableDOPClkGating:bdw */
7325 misccpctl = I915_READ(GEN7_MISCCPCTL);
7326 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7328 I915_WRITE(GEN8_L3SQCREG1,
7329 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
7330 L3_HIGH_PRIO_CREDITS(high_prio_credits));
7333 * Wait at least 100 clocks before re-enabling clock gating.
7334 * See the definition of L3SQCREG1 in BSpec.
7336 POSTING_READ(GEN8_L3SQCREG1);
7338 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
7341 static void kabylake_init_clock_gating(struct drm_device *dev)
7343 struct drm_i915_private *dev_priv = dev->dev_private;
7345 gen9_init_clock_gating(dev);
7347 /* WaDisableSDEUnitClockGating:kbl */
7348 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7349 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7350 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7352 /* WaDisableGamClockGating:kbl */
7353 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7354 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7355 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7357 /* WaFbcNukeOnHostModify:kbl */
7358 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7359 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7362 static void skylake_init_clock_gating(struct drm_device *dev)
7364 struct drm_i915_private *dev_priv = dev->dev_private;
7366 gen9_init_clock_gating(dev);
7368 /* WAC6entrylatency:skl */
7369 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
7370 FBC_LLC_FULLY_OPEN);
7372 /* WaFbcNukeOnHostModify:skl */
7373 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7374 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7377 static void broadwell_init_clock_gating(struct drm_device *dev)
7379 struct drm_i915_private *dev_priv = to_i915(dev);
7382 ilk_init_lp_watermarks(dev);
7384 /* WaSwitchSolVfFArbitrationPriority:bdw */
7385 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7387 /* WaPsrDPAMaskVBlankInSRD:bdw */
7388 I915_WRITE(CHICKEN_PAR1_1,
7389 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7391 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
7392 for_each_pipe(dev_priv, pipe) {
7393 I915_WRITE(CHICKEN_PIPESL_1(pipe),
7394 I915_READ(CHICKEN_PIPESL_1(pipe)) |
7395 BDW_DPRS_MASK_VBLANK_SRD);
7398 /* WaVSRefCountFullforceMissDisable:bdw */
7399 /* WaDSRefCountFullforceMissDisable:bdw */
7400 I915_WRITE(GEN7_FF_THREAD_MODE,
7401 I915_READ(GEN7_FF_THREAD_MODE) &
7402 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7404 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7405 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7407 /* WaDisableSDEUnitClockGating:bdw */
7408 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7409 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7411 /* WaProgramL3SqcReg1Default:bdw */
7412 gen8_set_l3sqc_credits(dev_priv, 30, 2);
7415 * WaGttCachingOffByDefault:bdw
7416 * GTT cache may not work with big pages, so if those
7417 * are ever enabled GTT cache may need to be disabled.
7419 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7421 /* WaKVMNotificationOnConfigChange:bdw */
7422 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
7423 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7425 lpt_init_clock_gating(dev);
7428 static void haswell_init_clock_gating(struct drm_device *dev)
7430 struct drm_i915_private *dev_priv = to_i915(dev);
7432 ilk_init_lp_watermarks(dev);
7434 /* L3 caching of data atomics doesn't work -- disable it. */
7435 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
7436 I915_WRITE(HSW_ROW_CHICKEN3,
7437 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
7439 /* This is required by WaCatErrorRejectionIssue:hsw */
7440 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7441 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7442 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7444 /* WaVSRefCountFullforceMissDisable:hsw */
7445 I915_WRITE(GEN7_FF_THREAD_MODE,
7446 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
7448 /* WaDisable_RenderCache_OperationalFlush:hsw */
7449 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7451 /* enable HiZ Raw Stall Optimization */
7452 I915_WRITE(CACHE_MODE_0_GEN7,
7453 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7455 /* WaDisable4x2SubspanOptimization:hsw */
7456 I915_WRITE(CACHE_MODE_1,
7457 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7460 * BSpec recommends 8x4 when MSAA is used,
7461 * however in practice 16x4 seems fastest.
7463 * Note that PS/WM thread counts depend on the WIZ hashing
7464 * disable bit, which we don't touch here, but it's good
7465 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7467 I915_WRITE(GEN7_GT_MODE,
7468 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7470 /* WaSampleCChickenBitEnable:hsw */
7471 I915_WRITE(HALF_SLICE_CHICKEN3,
7472 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
7474 /* WaSwitchSolVfFArbitrationPriority:hsw */
7475 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7477 /* WaRsPkgCStateDisplayPMReq:hsw */
7478 I915_WRITE(CHICKEN_PAR1_1,
7479 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
7481 lpt_init_clock_gating(dev);
7484 static void ivybridge_init_clock_gating(struct drm_device *dev)
7486 struct drm_i915_private *dev_priv = to_i915(dev);
7489 ilk_init_lp_watermarks(dev);
7491 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7493 /* WaDisableEarlyCull:ivb */
7494 I915_WRITE(_3D_CHICKEN3,
7495 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7497 /* WaDisableBackToBackFlipFix:ivb */
7498 I915_WRITE(IVB_CHICKEN3,
7499 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7500 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7502 /* WaDisablePSDDualDispatchEnable:ivb */
7503 if (IS_IVB_GT1(dev))
7504 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7505 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7507 /* WaDisable_RenderCache_OperationalFlush:ivb */
7508 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7510 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7511 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
7512 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
7514 /* WaApplyL3ControlAndL3ChickenMode:ivb */
7515 I915_WRITE(GEN7_L3CNTLREG1,
7516 GEN7_WA_FOR_GEN7_L3_CONTROL);
7517 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
7518 GEN7_WA_L3_CHICKEN_MODE);
7519 if (IS_IVB_GT1(dev))
7520 I915_WRITE(GEN7_ROW_CHICKEN2,
7521 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7523 /* must write both registers */
7524 I915_WRITE(GEN7_ROW_CHICKEN2,
7525 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7526 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
7527 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7530 /* WaForceL3Serialization:ivb */
7531 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7532 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7535 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7536 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7538 I915_WRITE(GEN6_UCGCTL2,
7539 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7541 /* This is required by WaCatErrorRejectionIssue:ivb */
7542 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7543 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7544 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7546 g4x_disable_trickle_feed(dev);
7548 gen7_setup_fixed_func_scheduler(dev_priv);
7550 if (0) { /* causes HiZ corruption on ivb:gt1 */
7551 /* enable HiZ Raw Stall Optimization */
7552 I915_WRITE(CACHE_MODE_0_GEN7,
7553 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7556 /* WaDisable4x2SubspanOptimization:ivb */
7557 I915_WRITE(CACHE_MODE_1,
7558 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7561 * BSpec recommends 8x4 when MSAA is used,
7562 * however in practice 16x4 seems fastest.
7564 * Note that PS/WM thread counts depend on the WIZ hashing
7565 * disable bit, which we don't touch here, but it's good
7566 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7568 I915_WRITE(GEN7_GT_MODE,
7569 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7571 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
7572 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7573 snpcr |= GEN6_MBC_SNPCR_MED;
7574 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
7576 if (!HAS_PCH_NOP(dev))
7577 cpt_init_clock_gating(dev);
7579 gen6_check_mch_setup(dev);
7582 static void valleyview_init_clock_gating(struct drm_device *dev)
7584 struct drm_i915_private *dev_priv = to_i915(dev);
7586 /* WaDisableEarlyCull:vlv */
7587 I915_WRITE(_3D_CHICKEN3,
7588 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7590 /* WaDisableBackToBackFlipFix:vlv */
7591 I915_WRITE(IVB_CHICKEN3,
7592 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7593 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7595 /* WaPsdDispatchEnable:vlv */
7596 /* WaDisablePSDDualDispatchEnable:vlv */
7597 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7598 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
7599 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7601 /* WaDisable_RenderCache_OperationalFlush:vlv */
7602 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7604 /* WaForceL3Serialization:vlv */
7605 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7606 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7608 /* WaDisableDopClockGating:vlv */
7609 I915_WRITE(GEN7_ROW_CHICKEN2,
7610 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7612 /* This is required by WaCatErrorRejectionIssue:vlv */
7613 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7614 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7615 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7617 gen7_setup_fixed_func_scheduler(dev_priv);
7620 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7621 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7623 I915_WRITE(GEN6_UCGCTL2,
7624 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7626 /* WaDisableL3Bank2xClockGate:vlv
7627 * Disabling L3 clock gating- MMIO 940c[25] = 1
7628 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7629 I915_WRITE(GEN7_UCGCTL4,
7630 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7633 * BSpec says this must be set, even though
7634 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7636 I915_WRITE(CACHE_MODE_1,
7637 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7640 * BSpec recommends 8x4 when MSAA is used,
7641 * however in practice 16x4 seems fastest.
7643 * Note that PS/WM thread counts depend on the WIZ hashing
7644 * disable bit, which we don't touch here, but it's good
7645 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7647 I915_WRITE(GEN7_GT_MODE,
7648 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7651 * WaIncreaseL3CreditsForVLVB0:vlv
7652 * This is the hardware default actually.
7654 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7657 * WaDisableVLVClockGating_VBIIssue:vlv
7658 * Disable clock gating on th GCFG unit to prevent a delay
7659 * in the reporting of vblank events.
7661 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7664 static void cherryview_init_clock_gating(struct drm_device *dev)
7666 struct drm_i915_private *dev_priv = to_i915(dev);
7668 /* WaVSRefCountFullforceMissDisable:chv */
7669 /* WaDSRefCountFullforceMissDisable:chv */
7670 I915_WRITE(GEN7_FF_THREAD_MODE,
7671 I915_READ(GEN7_FF_THREAD_MODE) &
7672 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7674 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7675 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7676 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7678 /* WaDisableCSUnitClockGating:chv */
7679 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7680 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7682 /* WaDisableSDEUnitClockGating:chv */
7683 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7684 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7687 * WaProgramL3SqcReg1Default:chv
7688 * See gfxspecs/Related Documents/Performance Guide/
7689 * LSQC Setting Recommendations.
7691 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7694 * GTT cache may not work with big pages, so if those
7695 * are ever enabled GTT cache may need to be disabled.
7697 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7700 static void g4x_init_clock_gating(struct drm_device *dev)
7702 struct drm_i915_private *dev_priv = to_i915(dev);
7703 uint32_t dspclk_gate;
7705 I915_WRITE(RENCLK_GATE_D1, 0);
7706 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7707 GS_UNIT_CLOCK_GATE_DISABLE |
7708 CL_UNIT_CLOCK_GATE_DISABLE);
7709 I915_WRITE(RAMCLK_GATE_D, 0);
7710 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7711 OVRUNIT_CLOCK_GATE_DISABLE |
7712 OVCUNIT_CLOCK_GATE_DISABLE;
7714 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7715 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7717 /* WaDisableRenderCachePipelinedFlush */
7718 I915_WRITE(CACHE_MODE_0,
7719 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7721 /* WaDisable_RenderCache_OperationalFlush:g4x */
7722 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7724 g4x_disable_trickle_feed(dev);
7727 static void crestline_init_clock_gating(struct drm_device *dev)
7729 struct drm_i915_private *dev_priv = to_i915(dev);
7731 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7732 I915_WRITE(RENCLK_GATE_D2, 0);
7733 I915_WRITE(DSPCLK_GATE_D, 0);
7734 I915_WRITE(RAMCLK_GATE_D, 0);
7735 I915_WRITE16(DEUC, 0);
7736 I915_WRITE(MI_ARB_STATE,
7737 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7739 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7740 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7743 static void broadwater_init_clock_gating(struct drm_device *dev)
7745 struct drm_i915_private *dev_priv = to_i915(dev);
7747 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7748 I965_RCC_CLOCK_GATE_DISABLE |
7749 I965_RCPB_CLOCK_GATE_DISABLE |
7750 I965_ISC_CLOCK_GATE_DISABLE |
7751 I965_FBC_CLOCK_GATE_DISABLE);
7752 I915_WRITE(RENCLK_GATE_D2, 0);
7753 I915_WRITE(MI_ARB_STATE,
7754 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7756 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7757 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7760 static void gen3_init_clock_gating(struct drm_device *dev)
7762 struct drm_i915_private *dev_priv = to_i915(dev);
7763 u32 dstate = I915_READ(D_STATE);
7765 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7766 DSTATE_DOT_CLOCK_GATING;
7767 I915_WRITE(D_STATE, dstate);
7769 if (IS_PINEVIEW(dev))
7770 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7772 /* IIR "flip pending" means done if this bit is set */
7773 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7775 /* interrupts should cause a wake up from C3 */
7776 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7778 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7779 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7781 I915_WRITE(MI_ARB_STATE,
7782 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7785 static void i85x_init_clock_gating(struct drm_device *dev)
7787 struct drm_i915_private *dev_priv = to_i915(dev);
7789 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7791 /* interrupts should cause a wake up from C3 */
7792 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7793 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7795 I915_WRITE(MEM_MODE,
7796 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7799 static void i830_init_clock_gating(struct drm_device *dev)
7801 struct drm_i915_private *dev_priv = to_i915(dev);
7803 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7805 I915_WRITE(MEM_MODE,
7806 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7807 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7810 void intel_init_clock_gating(struct drm_device *dev)
7812 struct drm_i915_private *dev_priv = to_i915(dev);
7814 dev_priv->display.init_clock_gating(dev);
7817 void intel_suspend_hw(struct drm_device *dev)
7819 if (HAS_PCH_LPT(dev))
7820 lpt_suspend_hw(dev);
7823 static void nop_init_clock_gating(struct drm_device *dev)
7825 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7829 * intel_init_clock_gating_hooks - setup the clock gating hooks
7830 * @dev_priv: device private
7832 * Setup the hooks that configure which clocks of a given platform can be
7833 * gated and also apply various GT and display specific workarounds for these
7834 * platforms. Note that some GT specific workarounds are applied separately
7835 * when GPU contexts or batchbuffers start their execution.
7837 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7839 if (IS_SKYLAKE(dev_priv))
7840 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7841 else if (IS_KABYLAKE(dev_priv))
7842 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
7843 else if (IS_BROXTON(dev_priv))
7844 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7845 else if (IS_BROADWELL(dev_priv))
7846 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7847 else if (IS_CHERRYVIEW(dev_priv))
7848 dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
7849 else if (IS_HASWELL(dev_priv))
7850 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7851 else if (IS_IVYBRIDGE(dev_priv))
7852 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7853 else if (IS_VALLEYVIEW(dev_priv))
7854 dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
7855 else if (IS_GEN6(dev_priv))
7856 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7857 else if (IS_GEN5(dev_priv))
7858 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7859 else if (IS_G4X(dev_priv))
7860 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7861 else if (IS_CRESTLINE(dev_priv))
7862 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7863 else if (IS_BROADWATER(dev_priv))
7864 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7865 else if (IS_GEN3(dev_priv))
7866 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7867 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7868 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7869 else if (IS_GEN2(dev_priv))
7870 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7872 MISSING_CASE(INTEL_DEVID(dev_priv));
7873 dev_priv->display.init_clock_gating = nop_init_clock_gating;
7877 /* Set up chip specific power management-related functions */
7878 void intel_init_pm(struct drm_device *dev)
7880 struct drm_i915_private *dev_priv = to_i915(dev);
7882 intel_fbc_init(dev_priv);
7885 if (IS_PINEVIEW(dev))
7886 i915_pineview_get_mem_freq(dev);
7887 else if (IS_GEN5(dev))
7888 i915_ironlake_get_mem_freq(dev);
7890 /* For FIFO watermark updates */
7891 if (INTEL_INFO(dev)->gen >= 9) {
7892 skl_setup_wm_latency(dev);
7893 dev_priv->display.update_wm = skl_update_wm;
7894 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7895 } else if (HAS_PCH_SPLIT(dev)) {
7896 ilk_setup_wm_latency(dev);
7898 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7899 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7900 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7901 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7902 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7903 dev_priv->display.compute_intermediate_wm =
7904 ilk_compute_intermediate_wm;
7905 dev_priv->display.initial_watermarks =
7906 ilk_initial_watermarks;
7907 dev_priv->display.optimize_watermarks =
7908 ilk_optimize_watermarks;
7910 DRM_DEBUG_KMS("Failed to read display plane latency. "
7913 } else if (IS_CHERRYVIEW(dev)) {
7914 vlv_setup_wm_latency(dev);
7915 dev_priv->display.update_wm = vlv_update_wm;
7916 } else if (IS_VALLEYVIEW(dev)) {
7917 vlv_setup_wm_latency(dev);
7918 dev_priv->display.update_wm = vlv_update_wm;
7919 } else if (IS_PINEVIEW(dev)) {
7920 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7923 dev_priv->mem_freq)) {
7924 DRM_INFO("failed to find known CxSR latency "
7925 "(found ddr%s fsb freq %d, mem freq %d), "
7927 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7928 dev_priv->fsb_freq, dev_priv->mem_freq);
7929 /* Disable CxSR and never update its watermark again */
7930 intel_set_memory_cxsr(dev_priv, false);
7931 dev_priv->display.update_wm = NULL;
7933 dev_priv->display.update_wm = pineview_update_wm;
7934 } else if (IS_G4X(dev)) {
7935 dev_priv->display.update_wm = g4x_update_wm;
7936 } else if (IS_GEN4(dev)) {
7937 dev_priv->display.update_wm = i965_update_wm;
7938 } else if (IS_GEN3(dev)) {
7939 dev_priv->display.update_wm = i9xx_update_wm;
7940 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7941 } else if (IS_GEN2(dev)) {
7942 if (INTEL_INFO(dev)->num_pipes == 1) {
7943 dev_priv->display.update_wm = i845_update_wm;
7944 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7946 dev_priv->display.update_wm = i9xx_update_wm;
7947 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7950 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7954 static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
7957 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7960 case GEN6_PCODE_SUCCESS:
7962 case GEN6_PCODE_UNIMPLEMENTED_CMD:
7963 case GEN6_PCODE_ILLEGAL_CMD:
7965 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7966 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7968 case GEN6_PCODE_TIMEOUT:
7976 static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
7979 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7982 case GEN6_PCODE_SUCCESS:
7984 case GEN6_PCODE_ILLEGAL_CMD:
7986 case GEN7_PCODE_TIMEOUT:
7988 case GEN7_PCODE_ILLEGAL_DATA:
7990 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7993 MISSING_CASE(flags);
7998 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
8002 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
8004 /* GEN6_PCODE_* are outside of the forcewake domain, we can
8005 * use te fw I915_READ variants to reduce the amount of work
8006 * required when reading/writing.
8009 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
8010 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
8014 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
8015 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
8016 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
8018 if (intel_wait_for_register_fw(dev_priv,
8019 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
8021 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
8025 *val = I915_READ_FW(GEN6_PCODE_DATA);
8026 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
8028 if (INTEL_GEN(dev_priv) > 6)
8029 status = gen7_check_mailbox_status(dev_priv);
8031 status = gen6_check_mailbox_status(dev_priv);
8034 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
8042 int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
8043 u32 mbox, u32 val, int timeout_us)
8047 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
8049 /* GEN6_PCODE_* are outside of the forcewake domain, we can
8050 * use te fw I915_READ variants to reduce the amount of work
8051 * required when reading/writing.
8054 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
8055 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
8059 I915_WRITE_FW(GEN6_PCODE_DATA, val);
8060 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
8062 if (intel_wait_for_register_fw(dev_priv,
8063 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
8065 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
8069 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
8071 if (INTEL_GEN(dev_priv) > 6)
8072 status = gen7_check_mailbox_status(dev_priv);
8074 status = gen6_check_mailbox_status(dev_priv);
8077 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
8085 static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
8086 u32 request, u32 reply_mask, u32 reply,
8091 *status = sandybridge_pcode_read(dev_priv, mbox, &val);
8093 return *status || ((val & reply_mask) == reply);
8097 * skl_pcode_request - send PCODE request until acknowledgment
8098 * @dev_priv: device private
8099 * @mbox: PCODE mailbox ID the request is targeted for
8100 * @request: request ID
8101 * @reply_mask: mask used to check for request acknowledgment
8102 * @reply: value used to check for request acknowledgment
8103 * @timeout_base_ms: timeout for polling with preemption enabled
8105 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
8106 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
8107 * The request is acknowledged once the PCODE reply dword equals @reply after
8108 * applying @reply_mask. Polling is first attempted with preemption enabled
8109 * for @timeout_base_ms and if this times out for another 50 ms with
8110 * preemption disabled.
8112 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
8113 * other error as reported by PCODE.
8115 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
8116 u32 reply_mask, u32 reply, int timeout_base_ms)
8121 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
8123 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
8127 * Prime the PCODE by doing a request first. Normally it guarantees
8128 * that a subsequent request, at most @timeout_base_ms later, succeeds.
8129 * _wait_for() doesn't guarantee when its passed condition is evaluated
8130 * first, so send the first request explicitly.
8136 ret = _wait_for(COND, timeout_base_ms * 1000, 10);
8141 * The above can time out if the number of requests was low (2 in the
8142 * worst case) _and_ PCODE was busy for some reason even after a
8143 * (queued) request and @timeout_base_ms delay. As a workaround retry
8144 * the poll with preemption disabled to maximize the number of
8145 * requests. Increase the timeout from @timeout_base_ms to 50ms to
8146 * account for interrupts that could reduce the number of these
8147 * requests, and for any quirks of the PCODE firmware that delays
8148 * the request completion.
8150 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
8151 WARN_ON_ONCE(timeout_base_ms > 3);
8153 ret = wait_for_atomic(COND, 50);
8157 return ret ? ret : status;
8161 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
8165 * Slow = Fast = GPLL ref * N
8167 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
8170 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
8172 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
8175 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
8179 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
8181 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
8184 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
8186 /* CHV needs even values */
8187 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
8190 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
8192 if (IS_GEN9(dev_priv))
8193 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
8195 else if (IS_CHERRYVIEW(dev_priv))
8196 return chv_gpu_freq(dev_priv, val);
8197 else if (IS_VALLEYVIEW(dev_priv))
8198 return byt_gpu_freq(dev_priv, val);
8200 return val * GT_FREQUENCY_MULTIPLIER;
8203 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
8205 if (IS_GEN9(dev_priv))
8206 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
8207 GT_FREQUENCY_MULTIPLIER);
8208 else if (IS_CHERRYVIEW(dev_priv))
8209 return chv_freq_opcode(dev_priv, val);
8210 else if (IS_VALLEYVIEW(dev_priv))
8211 return byt_freq_opcode(dev_priv, val);
8213 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
8216 struct request_boost {
8217 struct work_struct work;
8218 struct drm_i915_gem_request *req;
8221 static void __intel_rps_boost_work(struct work_struct *work)
8223 struct request_boost *boost = container_of(work, struct request_boost, work);
8224 struct drm_i915_gem_request *req = boost->req;
8226 if (!i915_gem_request_completed(req))
8227 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
8229 i915_gem_request_put(req);
8233 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
8235 struct request_boost *boost;
8237 if (req == NULL || INTEL_GEN(req->i915) < 6)
8240 if (i915_gem_request_completed(req))
8243 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
8247 boost->req = i915_gem_request_get(req);
8249 INIT_WORK(&boost->work, __intel_rps_boost_work);
8250 queue_work(req->i915->wq, &boost->work);
8253 void intel_pm_setup(struct drm_device *dev)
8255 struct drm_i915_private *dev_priv = to_i915(dev);
8257 mutex_init(&dev_priv->rps.hw_lock);
8258 spin_lock_init(&dev_priv->rps.client_lock);
8260 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
8261 __intel_autoenable_gt_powersave);
8262 INIT_LIST_HEAD(&dev_priv->rps.clients);
8264 dev_priv->pm.suspended = false;
8265 atomic_set(&dev_priv->pm.wakeref_count, 0);
8266 atomic_set(&dev_priv->pm.atomic_seq, 0);