1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
10 #include <linux/slab.h>
12 static void hl_ctx_fini(struct hl_ctx *ctx)
14 struct hl_device *hdev = ctx->hdev;
19 * If we arrived here, there are no jobs waiting for this context
20 * on its queues so we can safely remove it.
21 * This is because for each CS, we increment the ref count and for
22 * every CS that was finished we decrement it and we won't arrive
23 * to this function unless the ref count is 0
26 for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
27 hl_fence_put(ctx->cs_pending[i]);
29 kfree(ctx->cs_pending);
31 if (ctx->asid != HL_KERNEL_ASID_ID) {
32 dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid);
34 /* The engines are stopped as there is no executing CS, but the
35 * Coresight might be still working by accessing addresses
36 * related to the stopped engines. Hence stop it explicitly.
37 * Stop only if this is the compute context, as there can be
38 * only one compute context
40 if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
41 hl_device_set_debug_mode(hdev, false);
43 hl_cb_va_pool_fini(ctx);
45 hl_asid_free(hdev, ctx->asid);
47 if ((!hdev->pldm) && (hdev->pdev) &&
48 (!hdev->asic_funcs->is_device_idle(hdev,
51 "device not idle after user context is closed (0x%llx)\n",
54 dev_dbg(hdev->dev, "closing kernel context\n");
59 void hl_ctx_do_release(struct kref *ref)
63 ctx = container_of(ref, struct hl_ctx, refcount);
68 hl_hpriv_put(ctx->hpriv);
73 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
75 struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
79 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
85 mutex_lock(&mgr->ctx_lock);
86 rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
87 mutex_unlock(&mgr->ctx_lock);
90 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
96 rc = hl_ctx_init(hdev, ctx, false);
103 /* TODO: remove for multiple contexts per process */
106 /* TODO: remove the following line for multiple process support */
107 hdev->compute_ctx = ctx;
112 mutex_lock(&mgr->ctx_lock);
113 idr_remove(&mgr->ctx_handles, ctx->handle);
114 mutex_unlock(&mgr->ctx_lock);
121 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
123 if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
127 "user process released device but its command submissions are still executing\n");
130 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
136 kref_init(&ctx->refcount);
138 ctx->cs_sequence = 1;
139 spin_lock_init(&ctx->cs_lock);
140 atomic_set(&ctx->thread_ctx_switch_token, 1);
141 ctx->thread_ctx_switch_wait_token = 0;
142 ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
143 sizeof(struct hl_fence *),
145 if (!ctx->cs_pending)
149 ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
150 rc = hl_mmu_ctx_init(ctx);
152 dev_err(hdev->dev, "Failed to init mmu ctx module\n");
153 goto err_free_cs_pending;
156 ctx->asid = hl_asid_alloc(hdev);
158 dev_err(hdev->dev, "No free ASID, failed to create context\n");
160 goto err_free_cs_pending;
163 rc = hl_vm_ctx_init(ctx);
165 dev_err(hdev->dev, "Failed to init mem ctx module\n");
170 rc = hl_cb_va_pool_init(ctx);
173 "Failed to init VA pool for mapped CB\n");
174 goto err_vm_ctx_fini;
177 rc = hdev->asic_funcs->ctx_init(ctx);
179 dev_err(hdev->dev, "ctx_init failed\n");
180 goto err_cb_va_pool_fini;
183 dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
189 hl_cb_va_pool_fini(ctx);
193 hl_asid_free(hdev, ctx->asid);
195 kfree(ctx->cs_pending);
200 void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
202 kref_get(&ctx->refcount);
205 int hl_ctx_put(struct hl_ctx *ctx)
207 return kref_put(&ctx->refcount, hl_ctx_do_release);
210 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
212 struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
213 struct hl_fence *fence;
215 spin_lock(&ctx->cs_lock);
217 if (seq >= ctx->cs_sequence) {
218 spin_unlock(&ctx->cs_lock);
219 return ERR_PTR(-EINVAL);
222 if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) {
223 spin_unlock(&ctx->cs_lock);
227 fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
230 spin_unlock(&ctx->cs_lock);
236 * hl_ctx_mgr_init - initialize the context manager
238 * @mgr: pointer to context manager structure
240 * This manager is an object inside the hpriv object of the user process.
241 * The function is called when a user process opens the FD.
243 void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
245 mutex_init(&mgr->ctx_lock);
246 idr_init(&mgr->ctx_handles);
250 * hl_ctx_mgr_fini - finalize the context manager
252 * @hdev: pointer to device structure
253 * @mgr: pointer to context manager structure
255 * This function goes over all the contexts in the manager and frees them.
256 * It is called when a process closes the FD.
258 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
264 idp = &mgr->ctx_handles;
266 idr_for_each_entry(idp, ctx, id)
267 hl_ctx_free(hdev, ctx);
269 idr_destroy(&mgr->ctx_handles);
270 mutex_destroy(&mgr->ctx_lock);