1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2021 HabanaLabs, Ltd.
8 #include "habanalabs.h"
10 #include <linux/slab.h>
12 static void encaps_handle_do_release(struct hl_cs_encaps_sig_handle *handle, bool put_hw_sob,
15 struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
18 hw_sob_put(handle->hw_sob);
20 spin_lock(&mgr->lock);
21 idr_remove(&mgr->handles, handle->id);
22 spin_unlock(&mgr->lock);
25 hl_ctx_put(handle->ctx);
30 void hl_encaps_release_handle_and_put_ctx(struct kref *ref)
32 struct hl_cs_encaps_sig_handle *handle =
33 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
35 encaps_handle_do_release(handle, false, true);
38 static void hl_encaps_release_handle_and_put_sob(struct kref *ref)
40 struct hl_cs_encaps_sig_handle *handle =
41 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
43 encaps_handle_do_release(handle, true, false);
46 void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref)
48 struct hl_cs_encaps_sig_handle *handle =
49 container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
51 encaps_handle_do_release(handle, true, true);
54 static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
56 spin_lock_init(&mgr->lock);
57 idr_init(&mgr->handles);
60 static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, struct hl_encaps_signals_mgr *mgr)
62 struct hl_cs_encaps_sig_handle *handle;
68 /* The IDR is expected to be empty at this stage, because any left signal should have been
69 * released as part of CS roll-back.
71 if (!idr_is_empty(idp)) {
73 "device released while some encaps signals handles are still allocated\n");
74 idr_for_each_entry(idp, handle, id)
75 kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob);
78 idr_destroy(&mgr->handles);
81 static void hl_ctx_fini(struct hl_ctx *ctx)
83 struct hl_device *hdev = ctx->hdev;
86 /* Release all allocated HW block mapped list entries and destroy
89 hl_hw_block_mem_fini(ctx);
92 * If we arrived here, there are no jobs waiting for this context
93 * on its queues so we can safely remove it.
94 * This is because for each CS, we increment the ref count and for
95 * every CS that was finished we decrement it and we won't arrive
96 * to this function unless the ref count is 0
99 for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
100 hl_fence_put(ctx->cs_pending[i]);
102 kfree(ctx->cs_pending);
104 if (ctx->asid != HL_KERNEL_ASID_ID) {
105 dev_dbg(hdev->dev, "closing user context, asid=%u\n", ctx->asid);
107 /* The engines are stopped as there is no executing CS, but the
108 * Coresight might be still working by accessing addresses
109 * related to the stopped engines. Hence stop it explicitly.
112 hl_device_set_debug_mode(hdev, ctx, false);
114 hdev->asic_funcs->ctx_fini(ctx);
116 hl_dec_ctx_fini(ctx);
118 hl_cb_va_pool_fini(ctx);
120 hl_asid_free(hdev, ctx->asid);
121 hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
122 mutex_destroy(&ctx->ts_reg_lock);
124 dev_dbg(hdev->dev, "closing kernel context\n");
125 hdev->asic_funcs->ctx_fini(ctx);
127 hl_mmu_ctx_fini(ctx);
131 void hl_ctx_do_release(struct kref *ref)
135 ctx = container_of(ref, struct hl_ctx, refcount);
140 struct hl_fpriv *hpriv = ctx->hpriv;
142 mutex_lock(&hpriv->ctx_lock);
144 mutex_unlock(&hpriv->ctx_lock);
152 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
154 struct hl_ctx_mgr *ctx_mgr = &hpriv->ctx_mgr;
158 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
164 mutex_lock(&ctx_mgr->lock);
165 rc = idr_alloc(&ctx_mgr->handles, ctx, 1, 0, GFP_KERNEL);
166 mutex_unlock(&ctx_mgr->lock);
169 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
175 rc = hl_ctx_init(hdev, ctx, false);
177 goto remove_from_idr;
182 /* TODO: remove for multiple contexts per process */
185 /* TODO: remove the following line for multiple process support */
186 hdev->is_compute_ctx_active = true;
191 mutex_lock(&ctx_mgr->lock);
192 idr_remove(&ctx_mgr->handles, ctx->handle);
193 mutex_unlock(&ctx_mgr->lock);
200 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
202 char task_comm[TASK_COMM_LEN];
207 kref_init(&ctx->refcount);
209 ctx->cs_sequence = 1;
210 spin_lock_init(&ctx->cs_lock);
211 atomic_set(&ctx->thread_ctx_switch_token, 1);
212 ctx->thread_ctx_switch_wait_token = 0;
213 ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
214 sizeof(struct hl_fence *),
216 if (!ctx->cs_pending)
219 INIT_LIST_HEAD(&ctx->outcome_store.used_list);
220 INIT_LIST_HEAD(&ctx->outcome_store.free_list);
221 hash_init(ctx->outcome_store.outcome_map);
222 for (i = 0; i < ARRAY_SIZE(ctx->outcome_store.nodes_pool); ++i)
223 list_add(&ctx->outcome_store.nodes_pool[i].list_link,
224 &ctx->outcome_store.free_list);
226 hl_hw_block_mem_init(ctx);
229 ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
230 rc = hl_vm_ctx_init(ctx);
232 dev_err(hdev->dev, "Failed to init mem ctx module\n");
234 goto err_hw_block_mem_fini;
237 rc = hdev->asic_funcs->ctx_init(ctx);
239 dev_err(hdev->dev, "ctx_init failed\n");
240 goto err_vm_ctx_fini;
243 ctx->asid = hl_asid_alloc(hdev);
245 dev_err(hdev->dev, "No free ASID, failed to create context\n");
247 goto err_hw_block_mem_fini;
250 rc = hl_vm_ctx_init(ctx);
252 dev_err(hdev->dev, "Failed to init mem ctx module\n");
257 rc = hl_cb_va_pool_init(ctx);
260 "Failed to init VA pool for mapped CB\n");
261 goto err_vm_ctx_fini;
264 rc = hdev->asic_funcs->ctx_init(ctx);
266 dev_err(hdev->dev, "ctx_init failed\n");
267 goto err_cb_va_pool_fini;
270 hl_encaps_sig_mgr_init(&ctx->sig_mgr);
272 mutex_init(&ctx->ts_reg_lock);
274 dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n",
275 get_task_comm(task_comm, current), ctx->asid);
281 hl_cb_va_pool_fini(ctx);
285 if (ctx->asid != HL_KERNEL_ASID_ID)
286 hl_asid_free(hdev, ctx->asid);
287 err_hw_block_mem_fini:
288 hl_hw_block_mem_fini(ctx);
289 kfree(ctx->cs_pending);
294 static int hl_ctx_get_unless_zero(struct hl_ctx *ctx)
296 return kref_get_unless_zero(&ctx->refcount);
299 void hl_ctx_get(struct hl_ctx *ctx)
301 kref_get(&ctx->refcount);
304 int hl_ctx_put(struct hl_ctx *ctx)
306 return kref_put(&ctx->refcount, hl_ctx_do_release);
309 struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
311 struct hl_ctx *ctx = NULL;
312 struct hl_fpriv *hpriv;
314 mutex_lock(&hdev->fpriv_list_lock);
316 list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
317 mutex_lock(&hpriv->ctx_lock);
319 if (ctx && !hl_ctx_get_unless_zero(ctx))
321 mutex_unlock(&hpriv->ctx_lock);
323 /* There can only be a single user which has opened the compute device, so exit
324 * immediately once we find its context or if we see that it has been released
329 mutex_unlock(&hdev->fpriv_list_lock);
335 * hl_ctx_get_fence_locked - get CS fence under CS lock
337 * @ctx: pointer to the context structure.
338 * @seq: CS sequences number
340 * @return valid fence pointer on success, NULL if fence is gone, otherwise
343 * NOTE: this function shall be called with cs_lock locked
345 static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
347 struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
348 struct hl_fence *fence;
350 if (seq >= ctx->cs_sequence)
351 return ERR_PTR(-EINVAL);
353 if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
356 fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
361 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
363 struct hl_fence *fence;
365 spin_lock(&ctx->cs_lock);
367 fence = hl_ctx_get_fence_locked(ctx, seq);
369 spin_unlock(&ctx->cs_lock);
375 * hl_ctx_get_fences - get multiple CS fences under the same CS lock
377 * @ctx: pointer to the context structure.
378 * @seq_arr: array of CS sequences to wait for
379 * @fence: fence array to store the CS fences
380 * @arr_len: length of seq_arr and fence_arr
382 * @return 0 on success, otherwise non 0 error code
384 int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
385 struct hl_fence **fence, u32 arr_len)
387 struct hl_fence **fence_arr_base = fence;
390 spin_lock(&ctx->cs_lock);
392 for (i = 0; i < arr_len; i++, fence++) {
393 u64 seq = seq_arr[i];
395 *fence = hl_ctx_get_fence_locked(ctx, seq);
397 if (IS_ERR(*fence)) {
398 dev_err(ctx->hdev->dev,
399 "Failed to get fence for CS with seq 0x%llx\n",
401 rc = PTR_ERR(*fence);
406 spin_unlock(&ctx->cs_lock);
409 hl_fences_put(fence_arr_base, i);
415 * hl_ctx_mgr_init - initialize the context manager
417 * @ctx_mgr: pointer to context manager structure
419 * This manager is an object inside the hpriv object of the user process.
420 * The function is called when a user process opens the FD.
422 void hl_ctx_mgr_init(struct hl_ctx_mgr *ctx_mgr)
424 mutex_init(&ctx_mgr->lock);
425 idr_init(&ctx_mgr->handles);
429 * hl_ctx_mgr_fini - finalize the context manager
431 * @hdev: pointer to device structure
432 * @ctx_mgr: pointer to context manager structure
434 * This function goes over all the contexts in the manager and frees them.
435 * It is called when a process closes the FD.
437 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *ctx_mgr)
443 idp = &ctx_mgr->handles;
445 idr_for_each_entry(idp, ctx, id)
446 kref_put(&ctx->refcount, hl_ctx_do_release);
448 idr_destroy(&ctx_mgr->handles);
449 mutex_destroy(&ctx_mgr->lock);