2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/sync_file.h>
25 * Cmdstream submission:
28 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
29 #define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
30 #define BO_LOCKED 0x4000
31 #define BO_PINNED 0x2000
33 static struct msm_gem_submit *submit_create(struct drm_device *dev,
34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
36 struct msm_gem_submit *submit;
37 uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
38 ((u64)nr_cmds * sizeof(submit->cmd[0]));
43 submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
50 submit->pid = get_pid(task_pid(current));
51 submit->cmd = (void *)&submit->bos[nr_bos];
53 /* initially, until copy_from_user() and bo lookup succeeds: */
57 INIT_LIST_HEAD(&submit->node);
58 INIT_LIST_HEAD(&submit->bo_list);
59 ww_acquire_init(&submit->ticket, &reservation_ww_class);
64 void msm_gem_submit_free(struct msm_gem_submit *submit)
66 dma_fence_put(submit->fence);
67 list_del(&submit->node);
72 static inline unsigned long __must_check
73 copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
75 if (access_ok(VERIFY_READ, from, n))
76 return __copy_from_user_inatomic(to, from, n);
80 static int submit_lookup_objects(struct msm_gem_submit *submit,
81 struct drm_msm_gem_submit *args, struct drm_file *file)
86 spin_lock(&file->table_lock);
89 for (i = 0; i < args->nr_bos; i++) {
90 struct drm_msm_gem_submit_bo submit_bo;
91 struct drm_gem_object *obj;
92 struct msm_gem_object *msm_obj;
93 void __user *userptr =
94 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
96 /* make sure we don't have garbage flags, in case we hit
97 * error path before flags is initialized:
99 submit->bos[i].flags = 0;
101 if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) {
103 spin_unlock(&file->table_lock);
104 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
108 spin_lock(&file->table_lock);
112 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
113 !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
114 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
119 submit->bos[i].flags = submit_bo.flags;
120 /* in validate_objects() we figure out if this is true: */
121 submit->bos[i].iova = submit_bo.presumed;
123 /* normally use drm_gem_object_lookup(), but for bulk lookup
124 * all under single table_lock just hit object_idr directly:
126 obj = idr_find(&file->object_idr, submit_bo.handle);
128 DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
133 msm_obj = to_msm_bo(obj);
135 if (!list_empty(&msm_obj->submit_entry)) {
136 DRM_ERROR("handle %u at index %u already on submit list\n",
137 submit_bo.handle, i);
142 drm_gem_object_reference(obj);
144 submit->bos[i].obj = msm_obj;
146 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
151 spin_unlock(&file->table_lock);
159 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
161 struct msm_gem_object *msm_obj = submit->bos[i].obj;
163 if (submit->bos[i].flags & BO_PINNED)
164 msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
166 if (submit->bos[i].flags & BO_LOCKED)
167 ww_mutex_unlock(&msm_obj->resv->lock);
169 if (!(submit->bos[i].flags & BO_VALID))
170 submit->bos[i].iova = 0;
172 submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
175 /* This is where we make sure all the bo's are reserved and pin'd: */
176 static int submit_lock_objects(struct msm_gem_submit *submit)
178 int contended, slow_locked = -1, i, ret = 0;
181 for (i = 0; i < submit->nr_bos; i++) {
182 struct msm_gem_object *msm_obj = submit->bos[i].obj;
184 if (slow_locked == i)
189 if (!(submit->bos[i].flags & BO_LOCKED)) {
190 ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
194 submit->bos[i].flags |= BO_LOCKED;
198 ww_acquire_done(&submit->ticket);
204 submit_unlock_unpin_bo(submit, i);
207 submit_unlock_unpin_bo(submit, slow_locked);
209 if (ret == -EDEADLK) {
210 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
211 /* we lost out in a seqno race, lock and retry.. */
212 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
215 submit->bos[contended].flags |= BO_LOCKED;
216 slow_locked = contended;
224 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
228 for (i = 0; i < submit->nr_bos; i++) {
229 struct msm_gem_object *msm_obj = submit->bos[i].obj;
230 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
233 /* NOTE: _reserve_shared() must happen before
234 * _add_shared_fence(), which makes this a slightly
235 * strange place to call it. OTOH this is a
236 * convenient can-fail point to hook it in.
238 ret = reservation_object_reserve_shared(msm_obj->resv);
246 ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
254 static int submit_pin_objects(struct msm_gem_submit *submit)
258 submit->valid = true;
260 for (i = 0; i < submit->nr_bos; i++) {
261 struct msm_gem_object *msm_obj = submit->bos[i].obj;
264 /* if locking succeeded, pin bo: */
265 ret = msm_gem_get_iova(&msm_obj->base,
266 submit->gpu->aspace, &iova);
271 submit->bos[i].flags |= BO_PINNED;
273 if (iova == submit->bos[i].iova) {
274 submit->bos[i].flags |= BO_VALID;
276 submit->bos[i].iova = iova;
277 /* iova changed, so address in cmdstream is not valid: */
278 submit->bos[i].flags &= ~BO_VALID;
279 submit->valid = false;
286 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
287 struct msm_gem_object **obj, uint64_t *iova, bool *valid)
289 if (idx >= submit->nr_bos) {
290 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
291 idx, submit->nr_bos);
296 *obj = submit->bos[idx].obj;
298 *iova = submit->bos[idx].iova;
300 *valid = !!(submit->bos[idx].flags & BO_VALID);
305 /* process the reloc's and patch up the cmdstream as needed: */
306 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
307 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
309 uint32_t i, last_offset = 0;
314 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
318 /* For now, just map the entire thing. Eventually we probably
319 * to do it page-by-page, w/ kmap() if not vmap()d..
321 ptr = msm_gem_get_vaddr(&obj->base);
325 DBG("failed to map: %d", ret);
329 for (i = 0; i < nr_relocs; i++) {
330 struct drm_msm_gem_submit_reloc submit_reloc;
331 void __user *userptr =
332 u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
337 if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
342 if (submit_reloc.submit_offset % 4) {
343 DRM_ERROR("non-aligned reloc offset: %u\n",
344 submit_reloc.submit_offset);
349 /* offset in dwords: */
350 off = submit_reloc.submit_offset / 4;
352 if ((off >= (obj->base.size / 4)) ||
353 (off < last_offset)) {
354 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
359 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
366 iova += submit_reloc.reloc_offset;
368 if (submit_reloc.shift < 0)
369 iova >>= -submit_reloc.shift;
371 iova <<= submit_reloc.shift;
373 ptr[off] = iova | submit_reloc.or;
379 msm_gem_put_vaddr(&obj->base);
384 static void submit_cleanup(struct msm_gem_submit *submit)
388 for (i = 0; i < submit->nr_bos; i++) {
389 struct msm_gem_object *msm_obj = submit->bos[i].obj;
390 submit_unlock_unpin_bo(submit, i);
391 list_del_init(&msm_obj->submit_entry);
392 drm_gem_object_unreference(&msm_obj->base);
395 ww_acquire_fini(&submit->ticket);
398 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
399 struct drm_file *file)
401 struct msm_drm_private *priv = dev->dev_private;
402 struct drm_msm_gem_submit *args = data;
403 struct msm_file_private *ctx = file->driver_priv;
404 struct msm_gem_submit *submit;
405 struct msm_gpu *gpu = priv->gpu;
406 struct dma_fence *in_fence = NULL;
407 struct sync_file *sync_file = NULL;
408 int out_fence_fd = -1;
415 /* for now, we just have 3d pipe.. eventually this would need to
416 * be more clever to dispatch to appropriate gpu module:
418 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
421 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
424 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
425 in_fence = sync_file_get_fence(args->fence_fd);
431 * Wait if the fence is from a foreign context, or if the fence
432 * array contains any fence from a foreign context.
434 if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
435 ret = dma_fence_wait(in_fence, true);
441 ret = mutex_lock_interruptible(&dev->struct_mutex);
445 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
446 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
447 if (out_fence_fd < 0) {
452 priv->struct_mutex_task = current;
454 submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
460 ret = submit_lookup_objects(submit, args, file);
464 ret = submit_lock_objects(submit);
468 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
472 ret = submit_pin_objects(submit);
476 for (i = 0; i < args->nr_cmds; i++) {
477 struct drm_msm_gem_submit_cmd submit_cmd;
478 void __user *userptr =
479 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
480 struct msm_gem_object *msm_obj;
483 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
489 /* validate input from userspace: */
490 switch (submit_cmd.type) {
491 case MSM_SUBMIT_CMD_BUF:
492 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
493 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
496 DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
501 ret = submit_bo(submit, submit_cmd.submit_idx,
502 &msm_obj, &iova, NULL);
506 if (submit_cmd.size % 4) {
507 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
513 if (!submit_cmd.size ||
514 ((submit_cmd.size + submit_cmd.submit_offset) >
515 msm_obj->base.size)) {
516 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
521 submit->cmd[i].type = submit_cmd.type;
522 submit->cmd[i].size = submit_cmd.size / 4;
523 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
524 submit->cmd[i].idx = submit_cmd.submit_idx;
529 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
530 submit_cmd.nr_relocs, submit_cmd.relocs);
537 submit->fence = msm_fence_alloc(gpu->fctx);
538 if (IS_ERR(submit->fence)) {
539 ret = PTR_ERR(submit->fence);
540 submit->fence = NULL;
544 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
545 sync_file = sync_file_create(submit->fence);
552 msm_gpu_submit(gpu, submit, ctx);
554 args->fence = submit->fence->seqno;
556 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
557 fd_install(out_fence_fd, sync_file->file);
558 args->fence_fd = out_fence_fd;
563 dma_fence_put(in_fence);
564 submit_cleanup(submit);
566 msm_gem_submit_free(submit);
568 if (ret && (out_fence_fd >= 0))
569 put_unused_fd(out_fence_fd);
570 priv->struct_mutex_task = NULL;
571 mutex_unlock(&dev->struct_mutex);