1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include <uapi/drm/habanalabs_accel.h>
9 #include "habanalabs.h"
12 #include <linux/slab.h>
13 #include <linux/uaccess.h>
15 #define CB_VA_POOL_SIZE (4UL * SZ_1G)
17 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
19 struct hl_device *hdev = ctx->hdev;
20 struct asic_fixed_properties *prop = &hdev->asic_prop;
21 u32 page_size = prop->pmmu.page_size;
24 if (!hdev->supports_cb_mapping) {
25 dev_err_ratelimited(hdev->dev,
26 "Mapping a CB to the device's MMU is not supported\n");
30 if (cb->is_mmu_mapped)
33 cb->roundup_size = roundup(cb->size, page_size);
35 cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size);
36 if (!cb->virtual_addr) {
37 dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n");
41 mutex_lock(&hdev->mmu_lock);
43 rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size);
45 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
46 goto err_va_pool_free;
49 rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
53 mutex_unlock(&hdev->mmu_lock);
55 cb->is_mmu_mapped = true;
60 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
62 mutex_unlock(&hdev->mmu_lock);
63 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
68 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
70 struct hl_device *hdev = ctx->hdev;
72 mutex_lock(&hdev->mmu_lock);
73 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
74 hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
75 mutex_unlock(&hdev->mmu_lock);
77 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
80 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
83 gen_pool_free(hdev->internal_cb_pool,
84 (uintptr_t)cb->kernel_address, cb->size);
86 hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address);
91 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
94 atomic_set(&cb->is_handle_destroyed, 0);
95 spin_lock(&hdev->cb_pool_lock);
96 list_add(&cb->pool_list, &hdev->cb_pool);
97 spin_unlock(&hdev->cb_pool_lock);
103 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
104 int ctx_id, bool internal_cb)
106 struct hl_cb *cb = NULL;
111 * We use of GFP_ATOMIC here because this function can be called from
112 * the latency-sensitive code path for command submission. Due to H/W
113 * limitations in some of the ASICs, the kernel must copy the user CB
114 * that is designated for an external queue and actually enqueue
115 * the kernel's copy. Hence, we must never sleep in this code section
116 * and must use GFP_ATOMIC for all memory allocations.
118 if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
119 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
122 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
128 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
134 cb_offset = p - hdev->internal_cb_pool_virt_addr;
135 cb->is_internal = true;
136 cb->bus_address = hdev->internal_cb_va_base + cb_offset;
137 } else if (ctx_id == HL_KERNEL_ASID_ID) {
138 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC);
140 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL);
142 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address,
143 GFP_USER | __GFP_ZERO);
148 "failed to allocate %d of dma memory for CB\n",
154 cb->kernel_address = p;
160 struct hl_cb_mmap_mem_alloc_args {
161 struct hl_device *hdev;
168 static void hl_cb_mmap_mem_release(struct hl_mmap_mem_buf *buf)
170 struct hl_cb *cb = buf->private;
172 hl_debugfs_remove_cb(cb);
174 if (cb->is_mmu_mapped)
175 cb_unmap_mem(cb->ctx, cb);
179 cb_do_release(cb->hdev, cb);
182 static int hl_cb_mmap_mem_alloc(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
184 struct hl_cb_mmap_mem_alloc_args *cb_args = args;
186 int rc, ctx_id = cb_args->ctx->asid;
187 bool alloc_new_cb = true;
189 if (!cb_args->internal_cb) {
190 /* Minimum allocation must be PAGE SIZE */
191 if (cb_args->cb_size < PAGE_SIZE)
192 cb_args->cb_size = PAGE_SIZE;
194 if (ctx_id == HL_KERNEL_ASID_ID &&
195 cb_args->cb_size <= cb_args->hdev->asic_prop.cb_pool_cb_size) {
197 spin_lock(&cb_args->hdev->cb_pool_lock);
198 if (!list_empty(&cb_args->hdev->cb_pool)) {
199 cb = list_first_entry(&cb_args->hdev->cb_pool,
200 typeof(*cb), pool_list);
201 list_del(&cb->pool_list);
202 spin_unlock(&cb_args->hdev->cb_pool_lock);
203 alloc_new_cb = false;
205 spin_unlock(&cb_args->hdev->cb_pool_lock);
206 dev_dbg(cb_args->hdev->dev, "CB pool is empty\n");
212 cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb);
217 cb->hdev = cb_args->hdev;
218 cb->ctx = cb_args->ctx;
220 cb->buf->mappable_size = cb->size;
221 cb->buf->private = cb;
225 if (cb_args->map_cb) {
226 if (ctx_id == HL_KERNEL_ASID_ID) {
227 dev_err(cb_args->hdev->dev,
228 "CB mapping is not supported for kernel context\n");
233 rc = cb_map_mem(cb_args->ctx, cb);
238 hl_debugfs_add_cb(cb);
244 cb_do_release(cb_args->hdev, cb);
249 static int hl_cb_mmap(struct hl_mmap_mem_buf *buf,
250 struct vm_area_struct *vma, void *args)
252 struct hl_cb *cb = buf->private;
254 return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address,
255 cb->bus_address, cb->size);
258 static struct hl_mmap_mem_buf_behavior cb_behavior = {
260 .mem_id = HL_MMAP_TYPE_CB,
261 .alloc = hl_cb_mmap_mem_alloc,
262 .release = hl_cb_mmap_mem_release,
266 int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
267 struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
268 bool map_cb, u64 *handle)
270 struct hl_cb_mmap_mem_alloc_args args = {
274 .internal_cb = internal_cb,
277 struct hl_mmap_mem_buf *buf;
278 int ctx_id = ctx->asid;
280 if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
281 dev_warn_ratelimited(hdev->dev,
282 "Device is disabled or in reset. Can't create new CBs\n");
286 if (cb_size > SZ_2M) {
287 dev_err(hdev->dev, "CB size %d must be less than %d\n",
292 buf = hl_mmap_mem_buf_alloc(
294 ctx_id == HL_KERNEL_ASID_ID ? GFP_ATOMIC : GFP_KERNEL, &args);
298 *handle = buf->handle;
303 int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
308 cb = hl_cb_get(mmg, cb_handle);
310 dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n",
315 /* Make sure that CB handle isn't destroyed more than once */
316 rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
319 dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n",
324 rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle);
326 return rc; /* Invalid handle */
329 dev_dbg(mmg->dev, "CB 0x%llx is destroyed while still in use\n", cb_handle);
334 static int hl_cb_info(struct hl_mem_mgr *mmg,
335 u64 handle, u32 flags, u32 *usage_cnt, u64 *device_va)
340 cb = hl_cb_get(mmg, handle);
343 "CB info failed, no match to handle 0x%llx\n", handle);
347 if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
348 if (cb->is_mmu_mapped) {
349 *device_va = cb->virtual_addr;
351 dev_err(mmg->dev, "CB is not mapped to the device's MMU\n");
356 *usage_cnt = atomic_read(&cb->cs_cnt);
364 int hl_cb_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
366 struct hl_fpriv *hpriv = file_priv->driver_priv;
367 struct hl_device *hdev = hpriv->hdev;
368 union hl_cb_args *args = data;
369 u64 handle = 0, device_va = 0;
370 enum hl_device_status status;
374 if (!hl_device_operational(hdev, &status)) {
375 dev_dbg_ratelimited(hdev->dev,
376 "Device is %s. Can't execute CB IOCTL\n",
377 hdev->status[status]);
381 switch (args->in.op) {
382 case HL_CB_OP_CREATE:
383 if (args->in.cb_size > HL_MAX_CB_SIZE) {
385 "User requested CB size %d must be less than %d\n",
386 args->in.cb_size, HL_MAX_CB_SIZE);
389 rc = hl_cb_create(hdev, &hpriv->mem_mgr, hpriv->ctx,
390 args->in.cb_size, false,
391 !!(args->in.flags & HL_CB_FLAGS_MAP),
395 memset(args, 0, sizeof(*args));
396 args->out.cb_handle = handle;
399 case HL_CB_OP_DESTROY:
400 rc = hl_cb_destroy(&hpriv->mem_mgr,
405 rc = hl_cb_info(&hpriv->mem_mgr, args->in.cb_handle,
412 memset(&args->out, 0, sizeof(args->out));
414 if (args->in.flags & HL_CB_FLAGS_GET_DEVICE_VA)
415 args->out.device_va = device_va;
417 args->out.usage_cnt = usage_cnt;
428 struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle)
430 struct hl_mmap_mem_buf *buf;
432 buf = hl_mmap_mem_buf_get(mmg, handle);
439 void hl_cb_put(struct hl_cb *cb)
441 hl_mmap_mem_buf_put(cb->buf);
444 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
451 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, cb_size,
452 internal_cb, false, &cb_handle);
455 "Failed to allocate CB for the kernel driver %d\n", rc);
459 cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle);
460 /* hl_cb_get should never fail here */
462 dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
470 hl_cb_destroy(&hdev->kernel_mem_mgr, cb_handle);
475 int hl_cb_pool_init(struct hl_device *hdev)
480 INIT_LIST_HEAD(&hdev->cb_pool);
481 spin_lock_init(&hdev->cb_pool_lock);
483 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
484 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
485 HL_KERNEL_ASID_ID, false);
488 list_add(&cb->pool_list, &hdev->cb_pool);
490 hl_cb_pool_fini(hdev);
498 int hl_cb_pool_fini(struct hl_device *hdev)
500 struct hl_cb *cb, *tmp;
502 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
503 list_del(&cb->pool_list);
510 int hl_cb_va_pool_init(struct hl_ctx *ctx)
512 struct hl_device *hdev = ctx->hdev;
513 struct asic_fixed_properties *prop = &hdev->asic_prop;
516 if (!hdev->supports_cb_mapping)
519 ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
520 if (!ctx->cb_va_pool) {
522 "Failed to create VA gen pool for CB mapping\n");
526 ctx->cb_va_pool_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST,
527 CB_VA_POOL_SIZE, HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
528 if (!ctx->cb_va_pool_base) {
530 goto err_pool_destroy;
532 rc = gen_pool_add(ctx->cb_va_pool, ctx->cb_va_pool_base, CB_VA_POOL_SIZE, -1);
535 "Failed to add memory to VA gen pool for CB mapping\n");
536 goto err_unreserve_va_block;
541 err_unreserve_va_block:
542 hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
544 gen_pool_destroy(ctx->cb_va_pool);
549 void hl_cb_va_pool_fini(struct hl_ctx *ctx)
551 struct hl_device *hdev = ctx->hdev;
553 if (!hdev->supports_cb_mapping)
556 gen_pool_destroy(ctx->cb_va_pool);
557 hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);