1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2022 HabanaLabs, Ltd.
8 #include "habanalabs.h"
11 * hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
12 * the buffer descriptor.
14 * @mmg: parent unified memory manager
15 * @handle: requested buffer handle
17 * Find the buffer in the store and return a pointer to its descriptor.
18 * Increase buffer refcount. If not found - return NULL.
20 struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
22 struct hl_mmap_mem_buf *buf;
24 spin_lock(&mmg->lock);
25 buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
27 spin_unlock(&mmg->lock);
28 dev_dbg(mmg->dev, "Buff get failed, no match to handle %#llx\n", handle);
31 kref_get(&buf->refcount);
32 spin_unlock(&mmg->lock);
37 * hl_mmap_mem_buf_destroy - destroy the unused buffer
39 * @buf: memory manager buffer descriptor
41 * Internal function, used as a final step of buffer release. Shall be invoked
42 * only when the buffer is no longer in use (removed from idr). Will call the
43 * release callback (if applicable), and free the memory.
45 static void hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf *buf)
47 if (buf->behavior->release)
48 buf->behavior->release(buf);
54 * hl_mmap_mem_buf_release - release buffer
56 * @kref: kref that reached 0.
58 * Internal function, used as a kref release callback, when the last user of
59 * the buffer is released. Shall be called from an interrupt context.
61 static void hl_mmap_mem_buf_release(struct kref *kref)
63 struct hl_mmap_mem_buf *buf =
64 container_of(kref, struct hl_mmap_mem_buf, refcount);
66 spin_lock(&buf->mmg->lock);
67 idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
68 spin_unlock(&buf->mmg->lock);
70 hl_mmap_mem_buf_destroy(buf);
74 * hl_mmap_mem_buf_remove_idr_locked - remove handle from idr
76 * @kref: kref that reached 0.
78 * Internal function, used for kref put by handle. Assumes mmg lock is taken.
79 * Will remove the buffer from idr, without destroying it.
81 static void hl_mmap_mem_buf_remove_idr_locked(struct kref *kref)
83 struct hl_mmap_mem_buf *buf =
84 container_of(kref, struct hl_mmap_mem_buf, refcount);
86 idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
90 * hl_mmap_mem_buf_put - decrease the reference to the buffer
92 * @buf: memory manager buffer descriptor
94 * Decrease the reference to the buffer, and release it if it was the last one.
95 * Shall be called from an interrupt context.
97 int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
99 return kref_put(&buf->refcount, hl_mmap_mem_buf_release);
103 * hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
106 * @mmg: parent unified memory manager
107 * @handle: requested buffer handle
109 * Decrease the reference to the buffer, and release it if it was the last one.
110 * Shall not be called from an interrupt context. Return -EINVAL if handle was
111 * not found, else return the put outcome (0 or 1).
113 int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
115 struct hl_mmap_mem_buf *buf;
117 spin_lock(&mmg->lock);
118 buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
120 spin_unlock(&mmg->lock);
122 "Buff put failed, no match to handle %#llx\n", handle);
126 if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) {
127 spin_unlock(&mmg->lock);
128 hl_mmap_mem_buf_destroy(buf);
132 spin_unlock(&mmg->lock);
137 * hl_mmap_mem_buf_alloc - allocate a new mappable buffer
139 * @mmg: parent unified memory manager
140 * @behavior: behavior object describing this buffer polymorphic behavior
141 * @gfp: gfp flags to use for the memory allocations
142 * @args: additional args passed to behavior->alloc
144 * Allocate and register a new memory buffer inside the give memory manager.
145 * Return the pointer to the new buffer on success or NULL on failure.
147 struct hl_mmap_mem_buf *
148 hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
149 struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
152 struct hl_mmap_mem_buf *buf;
155 buf = kzalloc(sizeof(*buf), gfp);
159 spin_lock(&mmg->lock);
160 rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC);
161 spin_unlock(&mmg->lock);
164 "%s: Failed to allocate IDR for a new buffer, rc=%d\n",
165 behavior->topic, rc);
170 buf->behavior = behavior;
171 buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);
172 kref_init(&buf->refcount);
174 rc = buf->behavior->alloc(buf, gfp, args);
176 dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n",
177 behavior->topic, rc);
184 spin_lock(&mmg->lock);
185 idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
186 spin_unlock(&mmg->lock);
193 * hl_mmap_mem_buf_vm_close - handle mmap close
195 * @vma: the vma object for which mmap was closed.
197 * Put the memory buffer if it is no longer mapped.
199 static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma)
201 struct hl_mmap_mem_buf *buf =
202 (struct hl_mmap_mem_buf *)vma->vm_private_data;
205 new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);
207 if (new_mmap_size > 0) {
208 buf->real_mapped_size = new_mmap_size;
212 atomic_set(&buf->mmap, 0);
213 hl_mmap_mem_buf_put(buf);
214 vma->vm_private_data = NULL;
217 static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
218 .close = hl_mmap_mem_buf_vm_close
222 * hl_mem_mgr_mmap - map the given buffer to the user
224 * @mmg: unified memory manager
225 * @vma: the vma object for which mmap was closed.
226 * @args: additional args passed to behavior->mmap
228 * Map the buffer specified by the vma->vm_pgoff to the given vma.
230 int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
233 struct hl_mmap_mem_buf *buf;
238 /* We use the page offset to hold the idr and thus we need to clear
239 * it before doing the mmap itself
241 handle = vma->vm_pgoff << PAGE_SHIFT;
244 /* Reference was taken here */
245 buf = hl_mmap_mem_buf_get(mmg, handle);
248 "Memory mmap failed, no match to handle %#llx\n", handle);
252 /* Validation check */
253 user_mem_size = vma->vm_end - vma->vm_start;
254 if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
256 "%s: Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n",
257 buf->behavior->topic, user_mem_size, buf->mappable_size);
262 #ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
263 if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
266 if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
269 dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
270 buf->behavior->topic, vma->vm_start);
276 if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
278 "%s, Memory mmap failed, already mapped to user\n",
279 buf->behavior->topic);
284 vma->vm_ops = &hl_mmap_mem_buf_vm_ops;
286 /* Note: We're transferring the memory reference to vma->vm_private_data here. */
288 vma->vm_private_data = buf;
290 rc = buf->behavior->mmap(buf, vma, args);
292 atomic_set(&buf->mmap, 0);
296 buf->real_mapped_size = buf->mappable_size;
297 vma->vm_pgoff = handle >> PAGE_SHIFT;
302 hl_mmap_mem_buf_put(buf);
307 * hl_mem_mgr_init - initialize unified memory manager
309 * @dev: owner device pointer
310 * @mmg: structure to initialize
312 * Initialize an instance of unified memory manager
314 void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
317 spin_lock_init(&mmg->lock);
318 idr_init(&mmg->handles);
322 * hl_mem_mgr_fini - release unified memory manager
324 * @mmg: parent unified memory manager
326 * Release the unified memory manager. Shall be called from an interrupt context.
328 void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
330 struct hl_mmap_mem_buf *buf;
337 idr_for_each_entry(idp, buf, id) {
338 topic = buf->behavior->topic;
339 if (hl_mmap_mem_buf_put(buf) != 1)
341 "%s: Buff handle %u for CTX is still alive\n",
347 * hl_mem_mgr_idr_destroy() - destroy memory manager IDR.
348 * @mmg: parent unified memory manager
350 * Destroy the memory manager IDR.
351 * Shall be called when IDR is empty and no memory buffers are in use.
353 void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg)
355 if (!idr_is_empty(&mmg->handles))
356 dev_crit(mmg->dev, "memory manager IDR is destroyed while it is not empty!\n");
358 idr_destroy(&mmg->handles);