2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "gem/i915_gem_context.h"
8 #include "gem/i915_gem_pm.h"
11 #include "i915_globals.h"
13 #include "intel_context.h"
14 #include "intel_engine.h"
15 #include "intel_engine_pm.h"
16 #include "intel_ring.h"
18 static struct i915_global_context {
19 struct i915_global base;
20 struct kmem_cache *slab_ce;
23 static struct intel_context *intel_context_alloc(void)
25 return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
28 static void rcu_context_free(struct rcu_head *rcu)
30 struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
32 kmem_cache_free(global.slab_ce, ce);
35 void intel_context_free(struct intel_context *ce)
37 call_rcu(&ce->rcu, rcu_context_free);
40 struct intel_context *
41 intel_context_create(struct intel_engine_cs *engine)
43 struct intel_context *ce;
45 ce = intel_context_alloc();
47 return ERR_PTR(-ENOMEM);
49 intel_context_init(ce, engine);
53 int intel_context_alloc_state(struct intel_context *ce)
57 if (mutex_lock_interruptible(&ce->pin_mutex))
60 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
61 if (intel_context_is_banned(ce)) {
66 err = ce->ops->alloc(ce);
70 set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
74 mutex_unlock(&ce->pin_mutex);
78 static int intel_context_active_acquire(struct intel_context *ce)
82 __i915_active_acquire(&ce->active);
84 if (intel_context_is_barrier(ce))
87 /* Preallocate tracking nodes */
88 err = i915_active_acquire_preallocate_barrier(&ce->active,
91 i915_active_release(&ce->active);
96 static void intel_context_active_release(struct intel_context *ce)
98 /* Nodes preallocated in intel_context_active() */
99 i915_active_acquire_barrier(&ce->active);
100 i915_active_release(&ce->active);
103 static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
105 unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
108 err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
112 err = i915_active_acquire(&vma->active);
117 * And mark it as a globally pinned object to let the shrinker know
118 * it cannot reclaim the object until we release it.
120 i915_vma_make_unshrinkable(vma);
121 vma->obj->mm.dirty = true;
130 static void __context_unpin_state(struct i915_vma *vma)
132 i915_vma_make_shrinkable(vma);
133 i915_active_release(&vma->active);
134 __i915_vma_unpin(vma);
137 static int __ring_active(struct intel_ring *ring,
138 struct i915_gem_ww_ctx *ww)
142 err = intel_ring_pin(ring, ww);
146 err = i915_active_acquire(&ring->vma->active);
153 intel_ring_unpin(ring);
157 static void __ring_retire(struct intel_ring *ring)
159 i915_active_release(&ring->vma->active);
160 intel_ring_unpin(ring);
163 static int intel_context_pre_pin(struct intel_context *ce,
164 struct i915_gem_ww_ctx *ww)
168 CE_TRACE(ce, "active\n");
170 err = __ring_active(ce->ring, ww);
174 err = intel_timeline_pin(ce->timeline, ww);
181 err = __context_pin_state(ce->state, ww);
189 intel_timeline_unpin(ce->timeline);
191 __ring_retire(ce->ring);
195 static void intel_context_post_unpin(struct intel_context *ce)
198 __context_unpin_state(ce->state);
200 intel_timeline_unpin(ce->timeline);
201 __ring_retire(ce->ring);
204 int __intel_context_do_pin_ww(struct intel_context *ce,
205 struct i915_gem_ww_ctx *ww)
207 bool handoff = false;
211 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
212 err = intel_context_alloc_state(ce);
218 * We always pin the context/ring/timeline here, to ensure a pin
219 * refcount for __intel_context_active(), which prevent a lock
220 * inversion of ce->pin_mutex vs dma_resv_lock().
223 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
224 if (!err && ce->ring->vma->obj)
225 err = i915_gem_object_lock(ce->ring->vma->obj, ww);
226 if (!err && ce->state)
227 err = i915_gem_object_lock(ce->state->obj, ww);
229 err = intel_context_pre_pin(ce, ww);
233 err = i915_active_acquire(&ce->active);
237 err = ce->ops->pre_pin(ce, ww, &vaddr);
241 err = mutex_lock_interruptible(&ce->pin_mutex);
245 if (unlikely(intel_context_is_closed(ce))) {
250 if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
251 err = intel_context_active_acquire(ce);
255 err = ce->ops->pin(ce, vaddr);
257 intel_context_active_release(ce);
261 CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
262 i915_ggtt_offset(ce->ring->vma),
263 ce->ring->head, ce->ring->tail);
266 smp_mb__before_atomic(); /* flush pin before it is visible */
267 atomic_inc(&ce->pin_count);
270 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
273 mutex_unlock(&ce->pin_mutex);
276 ce->ops->post_unpin(ce);
278 i915_active_release(&ce->active);
280 intel_context_post_unpin(ce);
283 * Unlock the hwsp_ggtt object since it's shared.
284 * In principle we can unlock all the global state locked above
285 * since it's pinned and doesn't need fencing, and will
286 * thus remain resident until it is explicitly unpinned.
288 i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
293 int __intel_context_do_pin(struct intel_context *ce)
295 struct i915_gem_ww_ctx ww;
298 i915_gem_ww_ctx_init(&ww, true);
300 err = __intel_context_do_pin_ww(ce, &ww);
301 if (err == -EDEADLK) {
302 err = i915_gem_ww_ctx_backoff(&ww);
306 i915_gem_ww_ctx_fini(&ww);
310 void intel_context_unpin(struct intel_context *ce)
312 if (!atomic_dec_and_test(&ce->pin_count))
315 CE_TRACE(ce, "unpin\n");
317 ce->ops->post_unpin(ce);
320 * Once released, we may asynchronously drop the active reference.
321 * As that may be the only reference keeping the context alive,
322 * take an extra now so that it is not freed before we finish
325 intel_context_get(ce);
326 intel_context_active_release(ce);
327 intel_context_put(ce);
331 static void __intel_context_retire(struct i915_active *active)
333 struct intel_context *ce = container_of(active, typeof(*ce), active);
335 CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
336 intel_context_get_total_runtime_ns(ce),
337 intel_context_get_avg_runtime_ns(ce));
339 set_bit(CONTEXT_VALID_BIT, &ce->flags);
340 intel_context_post_unpin(ce);
341 intel_context_put(ce);
344 static int __intel_context_active(struct i915_active *active)
346 struct intel_context *ce = container_of(active, typeof(*ce), active);
348 intel_context_get(ce);
350 /* everything should already be activated by intel_context_pre_pin() */
351 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
352 __intel_ring_pin(ce->ring);
354 __intel_timeline_pin(ce->timeline);
357 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
358 __i915_vma_pin(ce->state);
359 i915_vma_make_unshrinkable(ce->state);
366 intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
368 GEM_BUG_ON(!engine->cops);
369 GEM_BUG_ON(!engine->gt->vm);
374 ce->ops = engine->cops;
375 ce->sseu = engine->sseu;
376 ce->ring = __intel_context_ring_size(SZ_4K);
378 ewma_runtime_init(&ce->runtime.avg);
380 ce->vm = i915_vm_get(engine->gt->vm);
382 /* NB ce->signal_link/lock is used under RCU */
383 spin_lock_init(&ce->signal_lock);
384 INIT_LIST_HEAD(&ce->signals);
386 mutex_init(&ce->pin_mutex);
388 i915_active_init(&ce->active,
389 __intel_context_active, __intel_context_retire);
392 void intel_context_fini(struct intel_context *ce)
395 intel_timeline_put(ce->timeline);
398 mutex_destroy(&ce->pin_mutex);
399 i915_active_fini(&ce->active);
402 static void i915_global_context_shrink(void)
404 kmem_cache_shrink(global.slab_ce);
407 static void i915_global_context_exit(void)
409 kmem_cache_destroy(global.slab_ce);
412 static struct i915_global_context global = { {
413 .shrink = i915_global_context_shrink,
414 .exit = i915_global_context_exit,
417 int __init i915_global_context_init(void)
419 global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
423 i915_global_register(&global.base);
427 void intel_context_enter_engine(struct intel_context *ce)
429 intel_engine_pm_get(ce->engine);
430 intel_timeline_enter(ce->timeline);
433 void intel_context_exit_engine(struct intel_context *ce)
435 intel_timeline_exit(ce->timeline);
436 intel_engine_pm_put(ce->engine);
439 int intel_context_prepare_remote_request(struct intel_context *ce,
440 struct i915_request *rq)
442 struct intel_timeline *tl = ce->timeline;
445 /* Only suitable for use in remotely modifying this context */
446 GEM_BUG_ON(rq->context == ce);
448 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
449 /* Queue this switch after current activity by this context. */
450 err = i915_active_fence_set(&tl->last_request, rq);
456 * Guarantee context image and the timeline remains pinned until the
457 * modifying request is retired by setting the ce activity tracker.
459 * But we only need to take one pin on the account of it. Or in other
460 * words transfer the pinned ce object to tracked active request.
462 GEM_BUG_ON(i915_active_is_idle(&ce->active));
463 return i915_active_add_request(&ce->active, rq);
466 struct i915_request *intel_context_create_request(struct intel_context *ce)
468 struct i915_gem_ww_ctx ww;
469 struct i915_request *rq;
472 i915_gem_ww_ctx_init(&ww, true);
474 err = intel_context_pin_ww(ce, &ww);
476 rq = i915_request_create(ce);
477 intel_context_unpin(ce);
478 } else if (err == -EDEADLK) {
479 err = i915_gem_ww_ctx_backoff(&ww);
487 i915_gem_ww_ctx_fini(&ww);
493 * timeline->mutex should be the inner lock, but is used as outer lock.
494 * Hack around this to shut up lockdep in selftests..
496 lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
497 mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
498 mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
499 rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
504 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
505 #include "selftest_context.c"