1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/file.h>
8 #include <linux/sync_file.h>
9 #include <linux/uaccess.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_file.h>
13 #include <drm/drm_syncobj.h>
18 #include "msm_gpu_trace.h"
21 * Cmdstream submission:
24 static struct msm_gem_submit *submit_create(struct drm_device *dev,
26 struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
29 struct msm_gem_submit *submit;
33 sz = struct_size(submit, bos, nr_bos) +
34 ((u64)nr_cmds * sizeof(submit->cmd[0]));
37 return ERR_PTR(-ENOMEM);
39 submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
41 return ERR_PTR(-ENOMEM);
43 ret = drm_sched_job_init(&submit->base, queue->entity, queue);
49 kref_init(&submit->ref);
51 submit->aspace = queue->ctx->aspace;
53 submit->cmd = (void *)&submit->bos[nr_bos];
54 submit->queue = queue;
55 submit->ring = gpu->rb[queue->ring_nr];
56 submit->fault_dumped = false;
58 INIT_LIST_HEAD(&submit->node);
63 void __msm_gem_submit_destroy(struct kref *kref)
65 struct msm_gem_submit *submit =
66 container_of(kref, struct msm_gem_submit, ref);
69 if (submit->fence_id) {
70 mutex_lock(&submit->queue->lock);
71 idr_remove(&submit->queue->fence_idr, submit->fence_id);
72 mutex_unlock(&submit->queue->lock);
75 dma_fence_put(submit->user_fence);
76 dma_fence_put(submit->hw_fence);
79 msm_submitqueue_put(submit->queue);
81 for (i = 0; i < submit->nr_cmds; i++)
82 kfree(submit->cmd[i].relocs);
87 static int submit_lookup_objects(struct msm_gem_submit *submit,
88 struct drm_msm_gem_submit *args, struct drm_file *file)
93 for (i = 0; i < args->nr_bos; i++) {
94 struct drm_msm_gem_submit_bo submit_bo;
95 void __user *userptr =
96 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
98 /* make sure we don't have garbage flags, in case we hit
99 * error path before flags is initialized:
101 submit->bos[i].flags = 0;
103 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
109 /* at least one of READ and/or WRITE flags should be set: */
110 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
112 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
113 !(submit_bo.flags & MANDATORY_FLAGS)) {
114 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
120 submit->bos[i].handle = submit_bo.handle;
121 submit->bos[i].flags = submit_bo.flags;
122 /* in validate_objects() we figure out if this is true: */
123 submit->bos[i].iova = submit_bo.presumed;
126 spin_lock(&file->table_lock);
128 for (i = 0; i < args->nr_bos; i++) {
129 struct drm_gem_object *obj;
131 /* normally use drm_gem_object_lookup(), but for bulk lookup
132 * all under single table_lock just hit object_idr directly:
134 obj = idr_find(&file->object_idr, submit->bos[i].handle);
136 DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
141 drm_gem_object_get(obj);
143 submit->bos[i].obj = to_msm_bo(obj);
147 spin_unlock(&file->table_lock);
155 static int submit_lookup_cmds(struct msm_gem_submit *submit,
156 struct drm_msm_gem_submit *args, struct drm_file *file)
162 for (i = 0; i < args->nr_cmds; i++) {
163 struct drm_msm_gem_submit_cmd submit_cmd;
164 void __user *userptr =
165 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
167 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
173 /* validate input from userspace: */
174 switch (submit_cmd.type) {
175 case MSM_SUBMIT_CMD_BUF:
176 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
177 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
180 DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
184 if (submit_cmd.size % 4) {
185 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
191 submit->cmd[i].type = submit_cmd.type;
192 submit->cmd[i].size = submit_cmd.size / 4;
193 submit->cmd[i].offset = submit_cmd.submit_offset / 4;
194 submit->cmd[i].idx = submit_cmd.submit_idx;
195 submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
197 userptr = u64_to_user_ptr(submit_cmd.relocs);
199 sz = array_size(submit_cmd.nr_relocs,
200 sizeof(struct drm_msm_gem_submit_reloc));
201 /* check for overflow: */
202 if (sz == SIZE_MAX) {
206 submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
207 ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
218 /* Unwind bo state, according to cleanup_flags. In the success case, only
219 * the lock is dropped at the end of the submit (and active/pin ref is dropped
220 * later when the submit is retired).
222 static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
223 unsigned cleanup_flags)
225 struct drm_gem_object *obj = &submit->bos[i].obj->base;
226 unsigned flags = submit->bos[i].flags & cleanup_flags;
229 * Clear flags bit before dropping lock, so that the msm_job_run()
230 * path isn't racing with submit_cleanup() (ie. the read/modify/
231 * write is protected by the obj lock in all paths)
233 submit->bos[i].flags &= ~cleanup_flags;
235 if (flags & BO_VMA_PINNED)
236 msm_gem_unpin_vma(submit->bos[i].vma);
238 if (flags & BO_OBJ_PINNED)
239 msm_gem_unpin_locked(obj);
241 if (flags & BO_ACTIVE)
242 msm_gem_active_put(obj);
244 if (flags & BO_LOCKED)
245 dma_resv_unlock(obj->resv);
248 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
250 unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
251 BO_ACTIVE | BO_LOCKED;
252 submit_cleanup_bo(submit, i, cleanup_flags);
254 if (!(submit->bos[i].flags & BO_VALID))
255 submit->bos[i].iova = 0;
258 /* This is where we make sure all the bo's are reserved and pin'd: */
259 static int submit_lock_objects(struct msm_gem_submit *submit)
261 int contended, slow_locked = -1, i, ret = 0;
264 for (i = 0; i < submit->nr_bos; i++) {
265 struct msm_gem_object *msm_obj = submit->bos[i].obj;
267 if (slow_locked == i)
272 if (!(submit->bos[i].flags & BO_LOCKED)) {
273 ret = dma_resv_lock_interruptible(msm_obj->base.resv,
277 submit->bos[i].flags |= BO_LOCKED;
281 ww_acquire_done(&submit->ticket);
286 if (ret == -EALREADY) {
287 DRM_ERROR("handle %u at index %u already on submit list\n",
288 submit->bos[i].handle, i);
293 submit_unlock_unpin_bo(submit, i);
296 submit_unlock_unpin_bo(submit, slow_locked);
298 if (ret == -EDEADLK) {
299 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
300 /* we lost out in a seqno race, lock and retry.. */
301 ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
304 submit->bos[contended].flags |= BO_LOCKED;
305 slow_locked = contended;
309 /* Not expecting -EALREADY here, if the bo was already
310 * locked, we should have gotten -EALREADY already from
311 * the dma_resv_lock_interruptable() call.
313 WARN_ON_ONCE(ret == -EALREADY);
319 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
323 for (i = 0; i < submit->nr_bos; i++) {
324 struct drm_gem_object *obj = &submit->bos[i].obj->base;
325 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
327 /* NOTE: _reserve_shared() must happen before
328 * _add_shared_fence(), which makes this a slightly
329 * strange place to call it. OTOH this is a
330 * convenient can-fail point to hook it in.
332 ret = dma_resv_reserve_fences(obj->resv, 1);
336 /* exclusive fences must be ordered */
337 if (no_implicit && !write)
340 ret = drm_sched_job_add_implicit_dependencies(&submit->base,
350 static int submit_pin_objects(struct msm_gem_submit *submit)
354 submit->valid = true;
357 * Increment active_count first, so if under memory pressure, we
358 * don't inadvertently evict a bo needed by the submit in order
359 * to pin an earlier bo in the same submit.
361 for (i = 0; i < submit->nr_bos; i++) {
362 struct drm_gem_object *obj = &submit->bos[i].obj->base;
364 msm_gem_active_get(obj, submit->gpu);
365 submit->bos[i].flags |= BO_ACTIVE;
368 for (i = 0; i < submit->nr_bos; i++) {
369 struct drm_gem_object *obj = &submit->bos[i].obj->base;
370 struct msm_gem_vma *vma;
372 /* if locking succeeded, pin bo: */
373 vma = msm_gem_get_vma_locked(obj, submit->aspace);
379 ret = msm_gem_pin_vma_locked(obj, vma);
383 submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
384 submit->bos[i].vma = vma;
386 if (vma->iova == submit->bos[i].iova) {
387 submit->bos[i].flags |= BO_VALID;
389 submit->bos[i].iova = vma->iova;
390 /* iova changed, so address in cmdstream is not valid: */
391 submit->bos[i].flags &= ~BO_VALID;
392 submit->valid = false;
399 static void submit_attach_object_fences(struct msm_gem_submit *submit)
403 for (i = 0; i < submit->nr_bos; i++) {
404 struct drm_gem_object *obj = &submit->bos[i].obj->base;
406 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
407 dma_resv_add_fence(obj->resv, submit->user_fence,
408 DMA_RESV_USAGE_WRITE);
409 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
410 dma_resv_add_fence(obj->resv, submit->user_fence,
411 DMA_RESV_USAGE_READ);
415 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
416 struct msm_gem_object **obj, uint64_t *iova, bool *valid)
418 if (idx >= submit->nr_bos) {
419 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
420 idx, submit->nr_bos);
425 *obj = submit->bos[idx].obj;
427 *iova = submit->bos[idx].iova;
429 *valid = !!(submit->bos[idx].flags & BO_VALID);
434 /* process the reloc's and patch up the cmdstream as needed: */
435 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
436 uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
438 uint32_t i, last_offset = 0;
446 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
450 /* For now, just map the entire thing. Eventually we probably
451 * to do it page-by-page, w/ kmap() if not vmap()d..
453 ptr = msm_gem_get_vaddr_locked(&obj->base);
457 DBG("failed to map: %d", ret);
461 for (i = 0; i < nr_relocs; i++) {
462 struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
467 if (submit_reloc.submit_offset % 4) {
468 DRM_ERROR("non-aligned reloc offset: %u\n",
469 submit_reloc.submit_offset);
474 /* offset in dwords: */
475 off = submit_reloc.submit_offset / 4;
477 if ((off >= (obj->base.size / 4)) ||
478 (off < last_offset)) {
479 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
484 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
491 iova += submit_reloc.reloc_offset;
493 if (submit_reloc.shift < 0)
494 iova >>= -submit_reloc.shift;
496 iova <<= submit_reloc.shift;
498 ptr[off] = iova | submit_reloc.or;
504 msm_gem_put_vaddr_locked(&obj->base);
509 /* Cleanup submit at end of ioctl. In the error case, this also drops
510 * references, unpins, and drops active refcnt. In the non-error case,
511 * this is done when the submit is retired.
513 static void submit_cleanup(struct msm_gem_submit *submit, bool error)
515 unsigned cleanup_flags = BO_LOCKED;
519 cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
521 for (i = 0; i < submit->nr_bos; i++) {
522 struct msm_gem_object *msm_obj = submit->bos[i].obj;
523 submit_cleanup_bo(submit, i, cleanup_flags);
525 drm_gem_object_put(&msm_obj->base);
529 void msm_submit_retire(struct msm_gem_submit *submit)
533 for (i = 0; i < submit->nr_bos; i++) {
534 struct drm_gem_object *obj = &submit->bos[i].obj->base;
537 /* Note, VMA already fence-unpinned before submit: */
538 submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
540 drm_gem_object_put(obj);
544 struct msm_submit_post_dep {
545 struct drm_syncobj *syncobj;
547 struct dma_fence_chain *chain;
550 static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
551 struct drm_file *file,
552 uint64_t in_syncobjs_addr,
553 uint32_t nr_in_syncobjs,
554 size_t syncobj_stride,
555 struct msm_ringbuffer *ring)
557 struct drm_syncobj **syncobjs = NULL;
558 struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
562 syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
563 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
565 return ERR_PTR(-ENOMEM);
567 for (i = 0; i < nr_in_syncobjs; ++i) {
568 uint64_t address = in_syncobjs_addr + i * syncobj_stride;
569 struct dma_fence *fence;
571 if (copy_from_user(&syncobj_desc,
572 u64_to_user_ptr(address),
573 min(syncobj_stride, sizeof(syncobj_desc)))) {
578 if (syncobj_desc.point &&
579 !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
584 if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
589 ret = drm_syncobj_find_fence(file, syncobj_desc.handle,
590 syncobj_desc.point, 0, &fence);
594 ret = drm_sched_job_add_dependency(&submit->base, fence);
598 if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
600 drm_syncobj_find(file, syncobj_desc.handle);
609 for (j = 0; j <= i; ++j) {
611 drm_syncobj_put(syncobjs[j]);
619 static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
620 uint32_t nr_syncobjs)
624 for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
626 drm_syncobj_replace_fence(syncobjs[i], NULL);
630 static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
631 struct drm_file *file,
632 uint64_t syncobjs_addr,
633 uint32_t nr_syncobjs,
634 size_t syncobj_stride)
636 struct msm_submit_post_dep *post_deps;
637 struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
641 post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
642 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
644 return ERR_PTR(-ENOMEM);
646 for (i = 0; i < nr_syncobjs; ++i) {
647 uint64_t address = syncobjs_addr + i * syncobj_stride;
649 if (copy_from_user(&syncobj_desc,
650 u64_to_user_ptr(address),
651 min(syncobj_stride, sizeof(syncobj_desc)))) {
656 post_deps[i].point = syncobj_desc.point;
657 post_deps[i].chain = NULL;
659 if (syncobj_desc.flags) {
664 if (syncobj_desc.point) {
665 if (!drm_core_check_feature(dev,
666 DRIVER_SYNCOBJ_TIMELINE)) {
671 post_deps[i].chain = dma_fence_chain_alloc();
672 if (!post_deps[i].chain) {
678 post_deps[i].syncobj =
679 drm_syncobj_find(file, syncobj_desc.handle);
680 if (!post_deps[i].syncobj) {
687 for (j = 0; j <= i; ++j) {
688 dma_fence_chain_free(post_deps[j].chain);
689 if (post_deps[j].syncobj)
690 drm_syncobj_put(post_deps[j].syncobj);
700 static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
701 uint32_t count, struct dma_fence *fence)
705 for (i = 0; post_deps && i < count; ++i) {
706 if (post_deps[i].chain) {
707 drm_syncobj_add_point(post_deps[i].syncobj,
709 fence, post_deps[i].point);
710 post_deps[i].chain = NULL;
712 drm_syncobj_replace_fence(post_deps[i].syncobj,
718 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
719 struct drm_file *file)
721 static atomic_t ident = ATOMIC_INIT(0);
722 struct msm_drm_private *priv = dev->dev_private;
723 struct drm_msm_gem_submit *args = data;
724 struct msm_file_private *ctx = file->driver_priv;
725 struct msm_gem_submit *submit = NULL;
726 struct msm_gpu *gpu = priv->gpu;
727 struct msm_gpu_submitqueue *queue;
728 struct msm_ringbuffer *ring;
729 struct msm_submit_post_dep *post_deps = NULL;
730 struct drm_syncobj **syncobjs_to_reset = NULL;
731 int out_fence_fd = -1;
732 struct pid *pid = get_pid(task_pid(current));
733 bool has_ww_ticket = false;
743 if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
744 DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
748 /* for now, we just have 3d pipe.. eventually this would need to
749 * be more clever to dispatch to appropriate gpu module:
751 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
754 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
757 if (args->flags & MSM_SUBMIT_SUDO) {
758 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
759 !capable(CAP_SYS_RAWIO))
763 queue = msm_submitqueue_get(ctx, args->queueid);
767 /* Get a unique identifier for the submission for logging purposes */
768 submitid = atomic_inc_return(&ident) - 1;
770 ring = gpu->rb[queue->ring_nr];
771 trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
772 args->nr_bos, args->nr_cmds);
774 ret = mutex_lock_interruptible(&queue->lock);
776 goto out_post_unlock;
778 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
779 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
780 if (out_fence_fd < 0) {
786 submit = submit_create(dev, gpu, queue, args->nr_bos,
788 if (IS_ERR(submit)) {
789 ret = PTR_ERR(submit);
795 submit->ident = submitid;
797 if (args->flags & MSM_SUBMIT_SUDO)
798 submit->in_rb = true;
800 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
801 struct dma_fence *in_fence;
803 in_fence = sync_file_get_fence(args->fence_fd);
810 ret = drm_sched_job_add_dependency(&submit->base, in_fence);
815 if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
816 syncobjs_to_reset = msm_parse_deps(submit, file,
818 args->nr_in_syncobjs,
819 args->syncobj_stride, ring);
820 if (IS_ERR(syncobjs_to_reset)) {
821 ret = PTR_ERR(syncobjs_to_reset);
826 if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
827 post_deps = msm_parse_post_deps(dev, file,
829 args->nr_out_syncobjs,
830 args->syncobj_stride);
831 if (IS_ERR(post_deps)) {
832 ret = PTR_ERR(post_deps);
837 ret = submit_lookup_objects(submit, args, file);
841 ret = submit_lookup_cmds(submit, args, file);
845 /* copy_*_user while holding a ww ticket upsets lockdep */
846 ww_acquire_init(&submit->ticket, &reservation_ww_class);
847 has_ww_ticket = true;
848 ret = submit_lock_objects(submit);
852 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
856 ret = submit_pin_objects(submit);
860 for (i = 0; i < args->nr_cmds; i++) {
861 struct msm_gem_object *msm_obj;
864 ret = submit_bo(submit, submit->cmd[i].idx,
865 &msm_obj, &iova, NULL);
869 if (!submit->cmd[i].size ||
870 ((submit->cmd[i].size + submit->cmd[i].offset) >
871 msm_obj->base.size / 4)) {
872 DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
877 submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
882 ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4,
883 submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
891 * If using userspace provided seqno fence, validate that the id
892 * is available before arming sched job. Since access to fence_idr
893 * is serialized on the queue lock, the slot should be still avail
894 * after the job is armed
896 if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
897 idr_find(&queue->fence_idr, args->fence)) {
902 drm_sched_job_arm(&submit->base);
904 submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
906 if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
908 * Userspace has assigned the seqno fence that it wants
909 * us to use. It is an error to pick a fence sequence
910 * number that is not available.
912 submit->fence_id = args->fence;
913 ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
914 &submit->fence_id, submit->fence_id,
917 * We've already validated that the fence_id slot is valid,
918 * so if idr_alloc_u32 failed, it is a kernel bug
923 * Allocate an id which can be used by WAIT_FENCE ioctl to map
924 * back to the underlying fence.
926 submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
927 submit->user_fence, 1,
928 INT_MAX, GFP_KERNEL);
930 if (submit->fence_id < 0) {
931 ret = submit->fence_id;
932 submit->fence_id = 0;
935 if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
936 struct sync_file *sync_file = sync_file_create(submit->user_fence);
940 fd_install(out_fence_fd, sync_file->file);
941 args->fence_fd = out_fence_fd;
945 submit_attach_object_fences(submit);
947 /* The scheduler owns a ref now: */
948 msm_gem_submit_get(submit);
950 drm_sched_entity_push_job(&submit->base);
952 args->fence = submit->fence_id;
953 queue->last_fence = submit->fence_id;
955 msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
956 msm_process_post_deps(post_deps, args->nr_out_syncobjs,
961 submit_cleanup(submit, !!ret);
963 ww_acquire_fini(&submit->ticket);
965 if (ret && (out_fence_fd >= 0))
966 put_unused_fd(out_fence_fd);
967 mutex_unlock(&queue->lock);
969 msm_gem_submit_put(submit);
971 if (!IS_ERR_OR_NULL(post_deps)) {
972 for (i = 0; i < args->nr_out_syncobjs; ++i) {
973 kfree(post_deps[i].chain);
974 drm_syncobj_put(post_deps[i].syncobj);
979 if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
980 for (i = 0; i < args->nr_in_syncobjs; ++i) {
981 if (syncobjs_to_reset[i])
982 drm_syncobj_put(syncobjs_to_reset[i]);
984 kfree(syncobjs_to_reset);