1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <drm/drm_atomic_state_helper.h>
8 #include "intel_atomic.h"
10 #include "intel_cdclk.h"
11 #include "intel_display_types.h"
13 #include "intel_sideband.h"
15 /* Parameters for Qclk Geyserville (QGV) */
16 struct intel_qgv_point {
17 u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
20 struct intel_qgv_info {
21 struct intel_qgv_point points[I915_NUM_QGV_POINTS];
26 static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
27 struct intel_qgv_point *sp,
30 u32 val = 0, val2 = 0;
33 ret = sandybridge_pcode_read(dev_priv,
34 ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
35 ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
40 sp->dclk = val & 0xffff;
41 sp->t_rp = (val & 0xff0000) >> 16;
42 sp->t_rcd = (val & 0xff000000) >> 24;
44 sp->t_rdpre = val2 & 0xff;
45 sp->t_ras = (val2 & 0xff00) >> 8;
47 sp->t_rc = sp->t_rp + sp->t_ras;
52 int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
57 /* bspec says to keep retrying for at least 1 ms */
58 ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
60 ICL_PCODE_POINTS_RESTRICTED_MASK,
61 ICL_PCODE_POINTS_RESTRICTED,
65 drm_err(&dev_priv->drm, "Failed to disable qgv points (%d)\n", ret);
72 static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
73 struct intel_qgv_info *qi)
75 const struct dram_info *dram_info = &dev_priv->dram_info;
78 qi->num_points = dram_info->num_qgv_points;
80 if (IS_GEN(dev_priv, 12))
81 qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16;
82 else if (IS_GEN(dev_priv, 11))
83 qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
85 if (drm_WARN_ON(&dev_priv->drm,
86 qi->num_points > ARRAY_SIZE(qi->points)))
87 qi->num_points = ARRAY_SIZE(qi->points);
89 for (i = 0; i < qi->num_points; i++) {
90 struct intel_qgv_point *sp = &qi->points[i];
92 ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
96 drm_dbg_kms(&dev_priv->drm,
97 "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
98 i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
105 static int icl_calc_bw(int dclk, int num, int den)
107 /* multiples of 16.666MHz (100/6) */
108 return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
111 static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
116 for (i = 0; i < qi->num_points; i++)
117 dclk = max(dclk, qi->points[i].dclk);
122 struct intel_sa_info {
124 u8 deburst, deprogbwlimit;
127 static const struct intel_sa_info icl_sa_info = {
129 .deprogbwlimit = 25, /* GB/s */
133 static const struct intel_sa_info tgl_sa_info = {
135 .deprogbwlimit = 34, /* GB/s */
139 static const struct intel_sa_info rkl_sa_info = {
141 .deprogbwlimit = 20, /* GB/s */
145 static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
147 struct intel_qgv_info qi = {};
148 bool is_y_tile = true; /* assume y tile may be used */
149 int num_channels = dev_priv->dram_info.num_channels;
151 int ipqdepth, ipqdepthpch;
156 ret = icl_get_qgv_points(dev_priv, &qi);
158 drm_dbg_kms(&dev_priv->drm,
159 "Failed to get memory subsystem information, ignoring bandwidth limits");
163 deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
164 dclk_max = icl_sagv_max_dclk(&qi);
168 maxdebw = min(sa->deprogbwlimit * 1000,
169 icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
170 ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
172 for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
173 struct intel_bw_info *bi = &dev_priv->max_bw[i];
177 clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
178 bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
180 bi->num_qgv_points = qi.num_points;
182 for (j = 0; j < qi.num_points; j++) {
183 const struct intel_qgv_point *sp = &qi.points[j];
189 * FIXME what is the logic behind the
190 * assumed burst length?
192 ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
193 (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
194 bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
196 bi->deratedbw[j] = min(maxdebw,
197 bw * 9 / 10); /* 90% */
199 drm_dbg_kms(&dev_priv->drm,
200 "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
201 i, j, bi->num_planes, bi->deratedbw[j]);
204 if (bi->num_planes == 1)
209 * In case if SAGV is disabled in BIOS, we always get 1
210 * SAGV point, but we can't send PCode commands to restrict it
211 * as it will fail and pointless anyway.
213 if (qi.num_points == 1)
214 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
216 dev_priv->sagv_status = I915_SAGV_ENABLED;
221 static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
222 int num_planes, int qgv_point)
227 * Let's return max bw for 0 planes
229 num_planes = max(1, num_planes);
231 for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
232 const struct intel_bw_info *bi =
233 &dev_priv->max_bw[i];
236 * Pcode will not expose all QGV points when
237 * SAGV is forced to off/min/med/max.
239 if (qgv_point >= bi->num_qgv_points)
242 if (num_planes >= bi->num_planes)
243 return bi->deratedbw[qgv_point];
249 void intel_bw_init_hw(struct drm_i915_private *dev_priv)
251 if (!HAS_DISPLAY(dev_priv))
254 if (IS_ROCKETLAKE(dev_priv))
255 icl_get_bw_info(dev_priv, &rkl_sa_info);
256 else if (IS_GEN(dev_priv, 12))
257 icl_get_bw_info(dev_priv, &tgl_sa_info);
258 else if (IS_GEN(dev_priv, 11))
259 icl_get_bw_info(dev_priv, &icl_sa_info);
262 static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
265 * We assume cursors are small enough
266 * to not not cause bandwidth problems.
268 return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
271 static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
273 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
274 unsigned int data_rate = 0;
275 enum plane_id plane_id;
277 for_each_plane_id_on_crtc(crtc, plane_id) {
279 * We assume cursors are small enough
280 * to not not cause bandwidth problems.
282 if (plane_id == PLANE_CURSOR)
285 data_rate += crtc_state->data_rate[plane_id];
291 void intel_bw_crtc_update(struct intel_bw_state *bw_state,
292 const struct intel_crtc_state *crtc_state)
294 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
295 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
297 bw_state->data_rate[crtc->pipe] =
298 intel_bw_crtc_data_rate(crtc_state);
299 bw_state->num_active_planes[crtc->pipe] =
300 intel_bw_crtc_num_active_planes(crtc_state);
302 drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
303 pipe_name(crtc->pipe),
304 bw_state->data_rate[crtc->pipe],
305 bw_state->num_active_planes[crtc->pipe]);
308 static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
309 const struct intel_bw_state *bw_state)
311 unsigned int num_active_planes = 0;
314 for_each_pipe(dev_priv, pipe)
315 num_active_planes += bw_state->num_active_planes[pipe];
317 return num_active_planes;
320 static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
321 const struct intel_bw_state *bw_state)
323 unsigned int data_rate = 0;
326 for_each_pipe(dev_priv, pipe)
327 data_rate += bw_state->data_rate[pipe];
332 struct intel_bw_state *
333 intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
335 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
336 struct intel_global_state *bw_state;
338 bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
340 return to_intel_bw_state(bw_state);
343 struct intel_bw_state *
344 intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
346 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
347 struct intel_global_state *bw_state;
349 bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
351 return to_intel_bw_state(bw_state);
354 struct intel_bw_state *
355 intel_atomic_get_bw_state(struct intel_atomic_state *state)
357 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
358 struct intel_global_state *bw_state;
360 bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
361 if (IS_ERR(bw_state))
362 return ERR_CAST(bw_state);
364 return to_intel_bw_state(bw_state);
367 int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
369 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
370 struct intel_bw_state *new_bw_state = NULL;
371 struct intel_bw_state *old_bw_state = NULL;
372 const struct intel_crtc_state *crtc_state;
373 struct intel_crtc *crtc;
379 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
380 enum plane_id plane_id;
381 struct intel_dbuf_bw *crtc_bw;
383 new_bw_state = intel_atomic_get_bw_state(state);
384 if (IS_ERR(new_bw_state))
385 return PTR_ERR(new_bw_state);
387 old_bw_state = intel_atomic_get_old_bw_state(state);
389 crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe];
391 memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
393 if (!crtc_state->hw.active)
396 for_each_plane_id_on_crtc(crtc, plane_id) {
397 const struct skl_ddb_entry *plane_alloc =
398 &crtc_state->wm.skl.plane_ddb_y[plane_id];
399 const struct skl_ddb_entry *uv_plane_alloc =
400 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
401 unsigned int data_rate = crtc_state->data_rate[plane_id];
402 unsigned int dbuf_mask = 0;
404 dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
405 dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
408 * FIXME: To calculate that more properly we probably
409 * need to to split per plane data_rate into data_rate_y
410 * and data_rate_uv for multiplanar formats in order not
411 * to get accounted those twice if they happen to reside
412 * on different slices.
413 * However for pre-icl this would work anyway because
414 * we have only single slice and for icl+ uv plane has
415 * non-zero data rate.
416 * So in worst case those calculation are a bit
417 * pessimistic, which shouldn't pose any significant
420 for_each_dbuf_slice_in_mask(slice_id, dbuf_mask)
421 crtc_bw->used_bw[slice_id] += data_rate;
428 for_each_pipe(dev_priv, pipe) {
429 struct intel_dbuf_bw *crtc_bw;
431 crtc_bw = &new_bw_state->dbuf_bw[pipe];
433 for_each_dbuf_slice(slice_id) {
435 * Current experimental observations show that contrary
436 * to BSpec we get underruns once we exceed 64 * CDCLK
437 * for slices in total.
438 * As a temporary measure in order not to keep CDCLK
439 * bumped up all the time we calculate CDCLK according
440 * to this formula for overall bw consumed by slices.
442 max_bw += crtc_bw->used_bw[slice_id];
446 new_bw_state->min_cdclk = max_bw / 64;
448 if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
449 int ret = intel_atomic_lock_global_state(&new_bw_state->base);
458 int intel_bw_calc_min_cdclk(struct intel_atomic_state *state)
460 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
461 struct intel_bw_state *new_bw_state = NULL;
462 struct intel_bw_state *old_bw_state = NULL;
463 const struct intel_crtc_state *crtc_state;
464 struct intel_crtc *crtc;
469 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
470 new_bw_state = intel_atomic_get_bw_state(state);
471 if (IS_ERR(new_bw_state))
472 return PTR_ERR(new_bw_state);
474 old_bw_state = intel_atomic_get_old_bw_state(state);
480 for_each_pipe(dev_priv, pipe) {
481 struct intel_cdclk_state *cdclk_state;
483 cdclk_state = intel_atomic_get_new_cdclk_state(state);
487 min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
490 new_bw_state->min_cdclk = min_cdclk;
492 if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
493 int ret = intel_atomic_lock_global_state(&new_bw_state->base);
502 int intel_bw_atomic_check(struct intel_atomic_state *state)
504 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
505 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
506 struct intel_bw_state *new_bw_state = NULL;
507 const struct intel_bw_state *old_bw_state = NULL;
508 unsigned int data_rate;
509 unsigned int num_active_planes;
510 struct intel_crtc *crtc;
512 u32 allowed_points = 0;
513 unsigned int max_bw_point = 0, max_bw = 0;
514 unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
515 u32 mask = (1 << num_qgv_points) - 1;
517 /* FIXME earlier gens need some checks too */
518 if (INTEL_GEN(dev_priv) < 11)
521 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
523 unsigned int old_data_rate =
524 intel_bw_crtc_data_rate(old_crtc_state);
525 unsigned int new_data_rate =
526 intel_bw_crtc_data_rate(new_crtc_state);
527 unsigned int old_active_planes =
528 intel_bw_crtc_num_active_planes(old_crtc_state);
529 unsigned int new_active_planes =
530 intel_bw_crtc_num_active_planes(new_crtc_state);
533 * Avoid locking the bw state when
534 * nothing significant has changed.
536 if (old_data_rate == new_data_rate &&
537 old_active_planes == new_active_planes)
540 new_bw_state = intel_atomic_get_bw_state(state);
541 if (IS_ERR(new_bw_state))
542 return PTR_ERR(new_bw_state);
544 new_bw_state->data_rate[crtc->pipe] = new_data_rate;
545 new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
547 drm_dbg_kms(&dev_priv->drm,
548 "pipe %c data rate %u num active planes %u\n",
549 pipe_name(crtc->pipe),
550 new_bw_state->data_rate[crtc->pipe],
551 new_bw_state->num_active_planes[crtc->pipe]);
557 ret = intel_atomic_lock_global_state(&new_bw_state->base);
561 data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
562 data_rate = DIV_ROUND_UP(data_rate, 1000);
564 num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
566 for (i = 0; i < num_qgv_points; i++) {
567 unsigned int max_data_rate;
569 max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
571 * We need to know which qgv point gives us
572 * maximum bandwidth in order to disable SAGV
573 * if we find that we exceed SAGV block time
574 * with watermarks. By that moment we already
575 * have those, as it is calculated earlier in
576 * intel_atomic_check,
578 if (max_data_rate > max_bw) {
580 max_bw = max_data_rate;
582 if (max_data_rate >= data_rate)
583 allowed_points |= BIT(i);
584 drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
585 i, max_data_rate, data_rate);
589 * BSpec states that we always should have at least one allowed point
590 * left, so if we couldn't - simply reject the configuration for obvious
593 if (allowed_points == 0) {
594 drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
595 " bandwidth %d for display configuration(%d active planes).\n",
596 data_rate, num_active_planes);
601 * Leave only single point with highest bandwidth, if
602 * we can't enable SAGV due to the increased memory latency it may
605 if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
606 allowed_points = BIT(max_bw_point);
607 drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
611 * We store the ones which need to be masked as that is what PCode
612 * actually accepts as a parameter.
614 new_bw_state->qgv_points_mask = ~allowed_points & mask;
616 old_bw_state = intel_atomic_get_old_bw_state(state);
618 * If the actual mask had changed we need to make sure that
619 * the commits are serialized(in case this is a nomodeset, nonblocking)
621 if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
622 ret = intel_atomic_serialize_global_state(&new_bw_state->base);
630 static struct intel_global_state *
631 intel_bw_duplicate_state(struct intel_global_obj *obj)
633 struct intel_bw_state *state;
635 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
642 static void intel_bw_destroy_state(struct intel_global_obj *obj,
643 struct intel_global_state *state)
648 static const struct intel_global_state_funcs intel_bw_funcs = {
649 .atomic_duplicate_state = intel_bw_duplicate_state,
650 .atomic_destroy_state = intel_bw_destroy_state,
653 int intel_bw_init(struct drm_i915_private *dev_priv)
655 struct intel_bw_state *state;
657 state = kzalloc(sizeof(*state), GFP_KERNEL);
661 intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
662 &state->base, &intel_bw_funcs);