1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "debugfs_gt.h"
8 #include "intel_context.h"
10 #include "intel_gt_buffer_pool.h"
11 #include "intel_gt_clock_utils.h"
12 #include "intel_gt_pm.h"
13 #include "intel_gt_requests.h"
14 #include "intel_mocs.h"
15 #include "intel_rc6.h"
16 #include "intel_renderstate.h"
17 #include "intel_rps.h"
18 #include "intel_uncore.h"
20 #include "shmem_utils.h"
22 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
25 gt->uncore = &i915->uncore;
27 spin_lock_init(>->irq_lock);
29 mutex_init(>->tlb_invalidate_lock);
31 INIT_LIST_HEAD(>->closed_vma);
32 spin_lock_init(>->closed_lock);
34 intel_gt_init_buffer_pool(gt);
35 intel_gt_init_reset(gt);
36 intel_gt_init_requests(gt);
37 intel_gt_init_timelines(gt);
38 intel_gt_pm_init_early(gt);
40 intel_rps_init_early(>->rps);
41 intel_uc_init_early(>->uc);
44 void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
49 int intel_gt_init_mmio(struct intel_gt *gt)
51 intel_uc_init_mmio(>->uc);
52 intel_sseu_info_init(gt);
54 return intel_engines_init_mmio(gt);
57 static void init_unused_ring(struct intel_gt *gt, u32 base)
59 struct intel_uncore *uncore = gt->uncore;
61 intel_uncore_write(uncore, RING_CTL(base), 0);
62 intel_uncore_write(uncore, RING_HEAD(base), 0);
63 intel_uncore_write(uncore, RING_TAIL(base), 0);
64 intel_uncore_write(uncore, RING_START(base), 0);
67 static void init_unused_rings(struct intel_gt *gt)
69 struct drm_i915_private *i915 = gt->i915;
72 init_unused_ring(gt, PRB1_BASE);
73 init_unused_ring(gt, SRB0_BASE);
74 init_unused_ring(gt, SRB1_BASE);
75 init_unused_ring(gt, SRB2_BASE);
76 init_unused_ring(gt, SRB3_BASE);
77 } else if (IS_GEN(i915, 2)) {
78 init_unused_ring(gt, SRB0_BASE);
79 init_unused_ring(gt, SRB1_BASE);
80 } else if (IS_GEN(i915, 3)) {
81 init_unused_ring(gt, PRB1_BASE);
82 init_unused_ring(gt, PRB2_BASE);
86 int intel_gt_init_hw(struct intel_gt *gt)
88 struct drm_i915_private *i915 = gt->i915;
89 struct intel_uncore *uncore = gt->uncore;
92 gt->last_init_time = ktime_get();
94 /* Double layer security blanket, see i915_gem_init() */
95 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
97 if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
98 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
100 if (IS_HASWELL(i915))
101 intel_uncore_write(uncore,
102 MI_PREDICATE_RESULT_2,
104 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
106 /* Apply the GT workarounds... */
107 intel_gt_apply_workarounds(gt);
108 /* ...and determine whether they are sticking. */
109 intel_gt_verify_workarounds(gt, "init");
111 intel_gt_init_swizzling(gt);
114 * At least 830 can leave some of the unused rings
115 * "active" (ie. head != tail) after resume which
116 * will prevent c3 entry. Makes sure all unused rings
119 init_unused_rings(gt);
121 ret = i915_ppgtt_init_hw(gt);
123 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
127 /* We can't enable contexts until all firmware is loaded */
128 ret = intel_uc_init_hw(>->uc);
130 i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
137 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
141 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
143 intel_uncore_rmw(uncore, reg, 0, set);
146 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
148 intel_uncore_rmw(uncore, reg, clr, 0);
151 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
153 intel_uncore_rmw(uncore, reg, 0, 0);
156 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
158 GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
159 GEN6_RING_FAULT_REG_POSTING_READ(engine);
163 intel_gt_clear_error_registers(struct intel_gt *gt,
164 intel_engine_mask_t engine_mask)
166 struct drm_i915_private *i915 = gt->i915;
167 struct intel_uncore *uncore = gt->uncore;
170 if (!IS_GEN(i915, 2))
171 clear_register(uncore, PGTBL_ER);
173 if (INTEL_GEN(i915) < 4)
174 clear_register(uncore, IPEIR(RENDER_RING_BASE));
176 clear_register(uncore, IPEIR_I965);
178 clear_register(uncore, EIR);
179 eir = intel_uncore_read(uncore, EIR);
182 * some errors might have become stuck,
185 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
186 rmw_set(uncore, EMR, eir);
187 intel_uncore_write(uncore, GEN2_IIR,
188 I915_MASTER_ERROR_INTERRUPT);
191 if (INTEL_GEN(i915) >= 12) {
192 rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
193 intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
194 } else if (INTEL_GEN(i915) >= 8) {
195 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
196 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
197 } else if (INTEL_GEN(i915) >= 6) {
198 struct intel_engine_cs *engine;
199 enum intel_engine_id id;
201 for_each_engine_masked(engine, gt, engine_mask, id)
202 gen8_clear_engine_error_register(engine);
206 static void gen6_check_faults(struct intel_gt *gt)
208 struct intel_engine_cs *engine;
209 enum intel_engine_id id;
212 for_each_engine(engine, gt, id) {
213 fault = GEN6_RING_FAULT_REG_READ(engine);
214 if (fault & RING_FAULT_VALID) {
215 drm_dbg(&engine->i915->drm, "Unexpected fault\n"
217 "\tAddress space: %s\n"
221 fault & RING_FAULT_GTTSEL_MASK ?
223 RING_FAULT_SRCID(fault),
224 RING_FAULT_FAULT_TYPE(fault));
229 static void gen8_check_faults(struct intel_gt *gt)
231 struct intel_uncore *uncore = gt->uncore;
232 i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
235 if (INTEL_GEN(gt->i915) >= 12) {
236 fault_reg = GEN12_RING_FAULT_REG;
237 fault_data0_reg = GEN12_FAULT_TLB_DATA0;
238 fault_data1_reg = GEN12_FAULT_TLB_DATA1;
240 fault_reg = GEN8_RING_FAULT_REG;
241 fault_data0_reg = GEN8_FAULT_TLB_DATA0;
242 fault_data1_reg = GEN8_FAULT_TLB_DATA1;
245 fault = intel_uncore_read(uncore, fault_reg);
246 if (fault & RING_FAULT_VALID) {
247 u32 fault_data0, fault_data1;
250 fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
251 fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
253 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
254 ((u64)fault_data0 << 12);
256 drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
257 "\tAddr: 0x%08x_%08x\n"
258 "\tAddress space: %s\n"
262 upper_32_bits(fault_addr), lower_32_bits(fault_addr),
263 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
264 GEN8_RING_FAULT_ENGINE_ID(fault),
265 RING_FAULT_SRCID(fault),
266 RING_FAULT_FAULT_TYPE(fault));
270 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
272 struct drm_i915_private *i915 = gt->i915;
274 /* From GEN8 onwards we only have one 'All Engine Fault Register' */
275 if (INTEL_GEN(i915) >= 8)
276 gen8_check_faults(gt);
277 else if (INTEL_GEN(i915) >= 6)
278 gen6_check_faults(gt);
282 intel_gt_clear_error_registers(gt, ALL_ENGINES);
285 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
287 struct intel_uncore *uncore = gt->uncore;
288 intel_wakeref_t wakeref;
291 * No actual flushing is required for the GTT write domain for reads
292 * from the GTT domain. Writes to it "immediately" go to main memory
293 * as far as we know, so there's no chipset flush. It also doesn't
294 * land in the GPU render cache.
296 * However, we do have to enforce the order so that all writes through
297 * the GTT land before any writes to the device, such as updates to
300 * We also have to wait a bit for the writes to land from the GTT.
301 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
302 * timing. This issue has only been observed when switching quickly
303 * between GTT writes and CPU reads from inside the kernel on recent hw,
304 * and it appears to only affect discrete GTT blocks (i.e. on LLC
305 * system agents we cannot reproduce this behaviour, until Cannonlake
311 if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
314 intel_gt_chipset_flush(gt);
316 with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
319 spin_lock_irqsave(&uncore->lock, flags);
320 intel_uncore_posting_read_fw(uncore,
321 RING_HEAD(RENDER_RING_BASE));
322 spin_unlock_irqrestore(&uncore->lock, flags);
326 void intel_gt_chipset_flush(struct intel_gt *gt)
329 if (INTEL_GEN(gt->i915) < 6)
330 intel_gtt_chipset_flush();
333 void intel_gt_driver_register(struct intel_gt *gt)
335 intel_rps_driver_register(>->rps);
337 debugfs_gt_register(gt);
340 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
342 struct drm_i915_private *i915 = gt->i915;
343 struct drm_i915_gem_object *obj;
344 struct i915_vma *vma;
347 obj = i915_gem_object_create_stolen(i915, size);
349 obj = i915_gem_object_create_internal(i915, size);
351 DRM_ERROR("Failed to allocate scratch page\n");
355 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
361 ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
365 gt->scratch = i915_vma_make_unshrinkable(vma);
370 i915_gem_object_put(obj);
374 static void intel_gt_fini_scratch(struct intel_gt *gt)
376 i915_vma_unpin_and_release(>->scratch, 0);
379 static struct i915_address_space *kernel_vm(struct intel_gt *gt)
381 if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
382 return &i915_ppgtt_create(gt)->vm;
384 return i915_vm_get(>->ggtt->vm);
387 static int __engines_record_defaults(struct intel_gt *gt)
389 struct i915_request *requests[I915_NUM_ENGINES] = {};
390 struct intel_engine_cs *engine;
391 enum intel_engine_id id;
395 * As we reset the gpu during very early sanitisation, the current
396 * register state on the GPU should reflect its defaults values.
397 * We load a context onto the hw (with restore-inhibit), then switch
398 * over to a second context to save that default register state. We
399 * can then prime every new context with that state so they all start
400 * from the same default HW values.
403 for_each_engine(engine, gt, id) {
404 struct intel_renderstate so;
405 struct intel_context *ce;
406 struct i915_request *rq;
408 /* We must be able to switch to something! */
409 GEM_BUG_ON(!engine->kernel_context);
411 ce = intel_context_create(engine);
417 err = intel_renderstate_init(&so, ce);
421 rq = i915_request_create(ce);
427 err = intel_engine_emit_ctx_wa(rq);
431 err = intel_renderstate_emit(&so, rq);
436 requests[id] = i915_request_get(rq);
437 i915_request_add(rq);
439 intel_renderstate_fini(&so, ce);
442 intel_context_put(ce);
447 /* Flush the default context image to memory, and enable powersaving. */
448 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
453 for (id = 0; id < ARRAY_SIZE(requests); id++) {
454 struct i915_request *rq;
461 if (rq->fence.error) {
466 GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
467 if (!rq->context->state)
470 /* Keep a copy of the state's backing pages; free the obj */
471 state = shmem_create_from_object(rq->context->state->obj);
473 err = PTR_ERR(state);
476 rq->engine->default_state = state;
481 * If we have to abandon now, we expect the engines to be idle
482 * and ready to be torn-down. The quickest way we can accomplish
483 * this is by declaring ourselves wedged.
486 intel_gt_set_wedged(gt);
488 for (id = 0; id < ARRAY_SIZE(requests); id++) {
489 struct intel_context *ce;
490 struct i915_request *rq;
497 i915_request_put(rq);
498 intel_context_put(ce);
503 static int __engines_verify_workarounds(struct intel_gt *gt)
505 struct intel_engine_cs *engine;
506 enum intel_engine_id id;
509 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
512 for_each_engine(engine, gt, id) {
513 if (intel_engine_verify_workarounds(engine, "load"))
517 /* Flush and restore the kernel context for safety */
518 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
524 static void __intel_gt_disable(struct intel_gt *gt)
526 intel_gt_set_wedged_on_fini(gt);
528 intel_gt_suspend_prepare(gt);
529 intel_gt_suspend_late(gt);
531 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
534 int intel_gt_init(struct intel_gt *gt)
538 err = i915_inject_probe_error(gt->i915, -ENODEV);
543 * This is just a security blanket to placate dragons.
544 * On some systems, we very sporadically observe that the first TLBs
545 * used by the CS may be stale, despite us poking the TLB reset. If
546 * we hold the forcewake during initialisation these problems
547 * just magically go away.
549 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
551 intel_gt_init_clock_frequency(gt);
553 err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
557 intel_gt_pm_init(gt);
559 gt->vm = kernel_vm(gt);
565 err = intel_engines_init(gt);
569 err = intel_uc_init(>->uc);
573 err = intel_gt_resume(gt);
577 err = __engines_record_defaults(gt);
581 err = __engines_verify_workarounds(gt);
585 err = i915_inject_probe_error(gt->i915, -EIO);
591 __intel_gt_disable(gt);
592 intel_uc_fini_hw(>->uc);
594 intel_uc_fini(>->uc);
596 intel_engines_release(gt);
597 i915_vm_put(fetch_and_zero(>->vm));
599 intel_gt_pm_fini(gt);
600 intel_gt_fini_scratch(gt);
603 intel_gt_set_wedged_on_init(gt);
604 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
608 void intel_gt_driver_remove(struct intel_gt *gt)
610 __intel_gt_disable(gt);
612 intel_uc_driver_remove(>->uc);
614 intel_engines_release(gt);
617 void intel_gt_driver_unregister(struct intel_gt *gt)
619 intel_rps_driver_unregister(>->rps);
622 * Upon unregistering the device to prevent any new users, cancel
623 * all in-flight requests so that we can quickly unbind the active
626 intel_gt_set_wedged(gt);
629 void intel_gt_driver_release(struct intel_gt *gt)
631 struct i915_address_space *vm;
632 intel_wakeref_t wakeref;
634 /* Scrub all HW state upon release */
635 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
636 __intel_gt_reset(gt, ALL_ENGINES);
638 vm = fetch_and_zero(>->vm);
639 if (vm) /* FIXME being called twice on error paths :( */
642 intel_gt_pm_fini(gt);
643 intel_gt_fini_scratch(gt);
644 intel_gt_fini_buffer_pool(gt);
647 void intel_gt_driver_late_release(struct intel_gt *gt)
649 /* We need to wait for inflight RCU frees to release their grip */
652 intel_uc_driver_late_release(>->uc);
653 intel_gt_fini_requests(gt);
654 intel_gt_fini_reset(gt);
655 intel_gt_fini_timelines(gt);
656 intel_engines_free(gt);
659 void intel_gt_info_print(const struct intel_gt_info *info,
660 struct drm_printer *p)
662 drm_printf(p, "available engines: %x\n", info->engine_mask);
664 intel_sseu_dump(&info->sseu, p);
672 static struct reg_and_bit
673 get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
674 const i915_reg_t *regs, const unsigned int num)
676 const unsigned int class = engine->class;
677 struct reg_and_bit rb = { };
679 if (drm_WARN_ON_ONCE(&engine->i915->drm,
680 class >= num || !regs[class].reg))
683 rb.reg = regs[class];
684 if (gen8 && class == VIDEO_DECODE_CLASS)
685 rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
687 rb.bit = engine->instance;
689 rb.bit = BIT(rb.bit);
694 void intel_gt_invalidate_tlbs(struct intel_gt *gt)
696 static const i915_reg_t gen8_regs[] = {
697 [RENDER_CLASS] = GEN8_RTCR,
698 [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
699 [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
700 [COPY_ENGINE_CLASS] = GEN8_BTCR,
702 static const i915_reg_t gen12_regs[] = {
703 [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
704 [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
705 [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
706 [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
708 struct drm_i915_private *i915 = gt->i915;
709 struct intel_uncore *uncore = gt->uncore;
710 struct intel_engine_cs *engine;
711 enum intel_engine_id id;
712 const i915_reg_t *regs;
713 unsigned int num = 0;
715 if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
718 if (INTEL_GEN(i915) == 12) {
720 num = ARRAY_SIZE(gen12_regs);
721 } else if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) <= 11) {
723 num = ARRAY_SIZE(gen8_regs);
724 } else if (INTEL_GEN(i915) < 8) {
728 if (drm_WARN_ONCE(&i915->drm, !num,
729 "Platform does not implement TLB invalidation!"))
734 assert_rpm_wakelock_held(&i915->runtime_pm);
736 mutex_lock(>->tlb_invalidate_lock);
737 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
739 spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
741 for_each_engine(engine, gt, id) {
742 struct reg_and_bit rb;
744 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
745 if (!i915_mmio_reg_offset(rb.reg))
748 if (INTEL_GEN(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
749 engine->class == VIDEO_ENHANCEMENT_CLASS))
750 rb.bit = _MASKED_BIT_ENABLE(rb.bit);
752 intel_uncore_write_fw(uncore, rb.reg, rb.bit);
755 spin_unlock_irq(&uncore->lock);
757 for_each_engine(engine, gt, id) {
759 * HW architecture suggest typical invalidation time at 40us,
760 * with pessimistic cases up to 100us and a recommendation to
761 * cap at 1ms. We go a bit higher just in case.
763 const unsigned int timeout_us = 100;
764 const unsigned int timeout_ms = 4;
765 struct reg_and_bit rb;
767 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
768 if (!i915_mmio_reg_offset(rb.reg))
771 if (__intel_wait_for_register_fw(uncore,
773 timeout_us, timeout_ms,
775 drm_err_ratelimited(>->i915->drm,
776 "%s TLB invalidation did not complete in %ums!\n",
777 engine->name, timeout_ms);
780 intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
781 mutex_unlock(>->tlb_invalidate_lock);