2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/device.h>
29 #include <linux/sched/signal.h>
31 #include "uapi/drm/vc4_drm.h"
34 #include "vc4_trace.h"
37 vc4_queue_hangcheck(struct drm_device *dev)
39 struct vc4_dev *vc4 = to_vc4_dev(dev);
41 mod_timer(&vc4->hangcheck.timer,
42 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
45 struct vc4_hang_state {
46 struct drm_vc4_get_hang_state user_state;
49 struct drm_gem_object **bo;
53 vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
57 for (i = 0; i < state->user_state.bo_count; i++)
58 drm_gem_object_put_unlocked(state->bo[i]);
64 vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
65 struct drm_file *file_priv)
67 struct drm_vc4_get_hang_state *get_state = data;
68 struct drm_vc4_get_hang_state_bo *bo_state;
69 struct vc4_hang_state *kernel_state;
70 struct drm_vc4_get_hang_state *state;
71 struct vc4_dev *vc4 = to_vc4_dev(dev);
72 unsigned long irqflags;
76 spin_lock_irqsave(&vc4->job_lock, irqflags);
77 kernel_state = vc4->hang_state;
79 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
82 state = &kernel_state->user_state;
84 /* If the user's array isn't big enough, just return the
85 * required array size.
87 if (get_state->bo_count < state->bo_count) {
88 get_state->bo_count = state->bo_count;
89 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
93 vc4->hang_state = NULL;
94 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
96 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
97 state->bo = get_state->bo;
98 memcpy(get_state, state, sizeof(*state));
100 bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
106 for (i = 0; i < state->bo_count; i++) {
107 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
110 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
115 goto err_delete_handle;
117 bo_state[i].handle = handle;
118 bo_state[i].paddr = vc4_bo->base.paddr;
119 bo_state[i].size = vc4_bo->base.base.size;
122 if (copy_to_user(u64_to_user_ptr(get_state->bo),
124 state->bo_count * sizeof(*bo_state)))
129 for (i = 0; i < state->bo_count; i++)
130 drm_gem_handle_delete(file_priv, bo_state[i].handle);
134 vc4_free_hang_state(dev, kernel_state);
141 vc4_save_hang_state(struct drm_device *dev)
143 struct vc4_dev *vc4 = to_vc4_dev(dev);
144 struct drm_vc4_get_hang_state *state;
145 struct vc4_hang_state *kernel_state;
146 struct vc4_exec_info *exec[2];
148 unsigned long irqflags;
149 unsigned int i, j, k, unref_list_count;
151 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
155 state = &kernel_state->user_state;
157 spin_lock_irqsave(&vc4->job_lock, irqflags);
158 exec[0] = vc4_first_bin_job(vc4);
159 exec[1] = vc4_first_render_job(vc4);
160 if (!exec[0] && !exec[1]) {
161 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
165 /* Get the bos from both binner and renderer into hang state. */
167 for (i = 0; i < 2; i++) {
171 unref_list_count = 0;
172 list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
174 state->bo_count += exec[i]->bo_count + unref_list_count;
177 kernel_state->bo = kcalloc(state->bo_count,
178 sizeof(*kernel_state->bo), GFP_ATOMIC);
180 if (!kernel_state->bo) {
181 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
186 for (i = 0; i < 2; i++) {
190 for (j = 0; j < exec[i]->bo_count; j++) {
191 drm_gem_object_get(&exec[i]->bo[j]->base);
192 kernel_state->bo[k++] = &exec[i]->bo[j]->base;
195 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
196 drm_gem_object_get(&bo->base.base);
197 kernel_state->bo[k++] = &bo->base.base;
201 WARN_ON_ONCE(k != state->bo_count);
204 state->start_bin = exec[0]->ct0ca;
206 state->start_render = exec[1]->ct1ca;
208 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
210 state->ct0ca = V3D_READ(V3D_CTNCA(0));
211 state->ct0ea = V3D_READ(V3D_CTNEA(0));
213 state->ct1ca = V3D_READ(V3D_CTNCA(1));
214 state->ct1ea = V3D_READ(V3D_CTNEA(1));
216 state->ct0cs = V3D_READ(V3D_CTNCS(0));
217 state->ct1cs = V3D_READ(V3D_CTNCS(1));
219 state->ct0ra0 = V3D_READ(V3D_CT00RA0);
220 state->ct1ra0 = V3D_READ(V3D_CT01RA0);
222 state->bpca = V3D_READ(V3D_BPCA);
223 state->bpcs = V3D_READ(V3D_BPCS);
224 state->bpoa = V3D_READ(V3D_BPOA);
225 state->bpos = V3D_READ(V3D_BPOS);
227 state->vpmbase = V3D_READ(V3D_VPMBASE);
229 state->dbge = V3D_READ(V3D_DBGE);
230 state->fdbgo = V3D_READ(V3D_FDBGO);
231 state->fdbgb = V3D_READ(V3D_FDBGB);
232 state->fdbgr = V3D_READ(V3D_FDBGR);
233 state->fdbgs = V3D_READ(V3D_FDBGS);
234 state->errstat = V3D_READ(V3D_ERRSTAT);
236 spin_lock_irqsave(&vc4->job_lock, irqflags);
237 if (vc4->hang_state) {
238 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
239 vc4_free_hang_state(dev, kernel_state);
241 vc4->hang_state = kernel_state;
242 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
247 vc4_reset(struct drm_device *dev)
249 struct vc4_dev *vc4 = to_vc4_dev(dev);
251 DRM_INFO("Resetting GPU.\n");
253 mutex_lock(&vc4->power_lock);
254 if (vc4->power_refcount) {
255 /* Power the device off and back on the by dropping the
256 * reference on runtime PM.
258 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
259 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
261 mutex_unlock(&vc4->power_lock);
265 /* Rearm the hangcheck -- another job might have been waiting
266 * for our hung one to get kicked off, and vc4_irq_reset()
267 * would have started it.
269 vc4_queue_hangcheck(dev);
273 vc4_reset_work(struct work_struct *work)
275 struct vc4_dev *vc4 =
276 container_of(work, struct vc4_dev, hangcheck.reset_work);
278 vc4_save_hang_state(vc4->dev);
284 vc4_hangcheck_elapsed(unsigned long data)
286 struct drm_device *dev = (struct drm_device *)data;
287 struct vc4_dev *vc4 = to_vc4_dev(dev);
288 uint32_t ct0ca, ct1ca;
289 unsigned long irqflags;
290 struct vc4_exec_info *bin_exec, *render_exec;
292 spin_lock_irqsave(&vc4->job_lock, irqflags);
294 bin_exec = vc4_first_bin_job(vc4);
295 render_exec = vc4_first_render_job(vc4);
297 /* If idle, we can stop watching for hangs. */
298 if (!bin_exec && !render_exec) {
299 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
303 ct0ca = V3D_READ(V3D_CTNCA(0));
304 ct1ca = V3D_READ(V3D_CTNCA(1));
306 /* If we've made any progress in execution, rearm the timer
309 if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
310 (render_exec && ct1ca != render_exec->last_ct1ca)) {
312 bin_exec->last_ct0ca = ct0ca;
314 render_exec->last_ct1ca = ct1ca;
315 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
316 vc4_queue_hangcheck(dev);
320 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
322 /* We've gone too long with no progress, reset. This has to
323 * be done from a work struct, since resetting can sleep and
324 * this timer hook isn't allowed to.
326 schedule_work(&vc4->hangcheck.reset_work);
330 submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
332 struct vc4_dev *vc4 = to_vc4_dev(dev);
334 /* Set the current and end address of the control list.
335 * Writing the end register is what starts the job.
337 V3D_WRITE(V3D_CTNCA(thread), start);
338 V3D_WRITE(V3D_CTNEA(thread), end);
342 vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
345 struct vc4_dev *vc4 = to_vc4_dev(dev);
347 unsigned long timeout_expire;
350 if (vc4->finished_seqno >= seqno)
356 timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
358 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
360 prepare_to_wait(&vc4->job_wait_queue, &wait,
361 interruptible ? TASK_INTERRUPTIBLE :
362 TASK_UNINTERRUPTIBLE);
364 if (interruptible && signal_pending(current)) {
369 if (vc4->finished_seqno >= seqno)
372 if (timeout_ns != ~0ull) {
373 if (time_after_eq(jiffies, timeout_expire)) {
377 schedule_timeout(timeout_expire - jiffies);
383 finish_wait(&vc4->job_wait_queue, &wait);
384 trace_vc4_wait_for_seqno_end(dev, seqno);
390 vc4_flush_caches(struct drm_device *dev)
392 struct vc4_dev *vc4 = to_vc4_dev(dev);
394 /* Flush the GPU L2 caches. These caches sit on top of system
395 * L3 (the 128kb or so shared with the CPU), and are
396 * non-allocating in the L3.
398 V3D_WRITE(V3D_L2CACTL,
401 V3D_WRITE(V3D_SLCACTL,
402 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
403 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
404 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
405 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
408 /* Sets the registers for the next job to be actually be executed in
411 * The job_lock should be held during this.
414 vc4_submit_next_bin_job(struct drm_device *dev)
416 struct vc4_dev *vc4 = to_vc4_dev(dev);
417 struct vc4_exec_info *exec;
420 exec = vc4_first_bin_job(vc4);
424 vc4_flush_caches(dev);
426 /* Either put the job in the binner if it uses the binner, or
427 * immediately move it to the to-be-rendered queue.
429 if (exec->ct0ca != exec->ct0ea) {
430 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
432 vc4_move_job_to_render(dev, exec);
438 vc4_submit_next_render_job(struct drm_device *dev)
440 struct vc4_dev *vc4 = to_vc4_dev(dev);
441 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
446 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
450 vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
452 struct vc4_dev *vc4 = to_vc4_dev(dev);
453 bool was_empty = list_empty(&vc4->render_job_list);
455 list_move_tail(&exec->head, &vc4->render_job_list);
457 vc4_submit_next_render_job(dev);
461 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
466 for (i = 0; i < exec->bo_count; i++) {
467 bo = to_vc4_bo(&exec->bo[i]->base);
470 reservation_object_add_shared_fence(bo->resv, exec->fence);
473 list_for_each_entry(bo, &exec->unref_list, unref_head) {
477 for (i = 0; i < exec->rcl_write_bo_count; i++) {
478 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
479 bo->write_seqno = seqno;
481 reservation_object_add_excl_fence(bo->resv, exec->fence);
486 vc4_unlock_bo_reservations(struct drm_device *dev,
487 struct vc4_exec_info *exec,
488 struct ww_acquire_ctx *acquire_ctx)
492 for (i = 0; i < exec->bo_count; i++) {
493 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
495 ww_mutex_unlock(&bo->resv->lock);
498 ww_acquire_fini(acquire_ctx);
501 /* Takes the reservation lock on all the BOs being referenced, so that
502 * at queue submit time we can update the reservations.
504 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
505 * (all of which are on exec->unref_list). They're entirely private
506 * to vc4, so we don't attach dma-buf fences to them.
509 vc4_lock_bo_reservations(struct drm_device *dev,
510 struct vc4_exec_info *exec,
511 struct ww_acquire_ctx *acquire_ctx)
513 int contended_lock = -1;
517 ww_acquire_init(acquire_ctx, &reservation_ww_class);
520 if (contended_lock != -1) {
521 bo = to_vc4_bo(&exec->bo[contended_lock]->base);
522 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
525 ww_acquire_done(acquire_ctx);
530 for (i = 0; i < exec->bo_count; i++) {
531 if (i == contended_lock)
534 bo = to_vc4_bo(&exec->bo[i]->base);
536 ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
540 for (j = 0; j < i; j++) {
541 bo = to_vc4_bo(&exec->bo[j]->base);
542 ww_mutex_unlock(&bo->resv->lock);
545 if (contended_lock != -1 && contended_lock >= i) {
546 bo = to_vc4_bo(&exec->bo[contended_lock]->base);
548 ww_mutex_unlock(&bo->resv->lock);
551 if (ret == -EDEADLK) {
556 ww_acquire_done(acquire_ctx);
561 ww_acquire_done(acquire_ctx);
563 /* Reserve space for our shared (read-only) fence references,
564 * before we commit the CL to the hardware.
566 for (i = 0; i < exec->bo_count; i++) {
567 bo = to_vc4_bo(&exec->bo[i]->base);
569 ret = reservation_object_reserve_shared(bo->resv);
571 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
579 /* Queues a struct vc4_exec_info for execution. If no job is
580 * currently executing, then submits it.
582 * Unlike most GPUs, our hardware only handles one command list at a
583 * time. To queue multiple jobs at once, we'd need to edit the
584 * previous command list to have a jump to the new one at the end, and
585 * then bump the end address. That's a change for a later date,
589 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
590 struct ww_acquire_ctx *acquire_ctx)
592 struct vc4_dev *vc4 = to_vc4_dev(dev);
594 unsigned long irqflags;
595 struct vc4_fence *fence;
597 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
602 spin_lock_irqsave(&vc4->job_lock, irqflags);
604 seqno = ++vc4->emit_seqno;
607 dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
608 vc4->dma_fence_context, exec->seqno);
609 fence->seqno = exec->seqno;
610 exec->fence = &fence->base;
612 vc4_update_bo_seqnos(exec, seqno);
614 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
616 list_add_tail(&exec->head, &vc4->bin_job_list);
618 /* If no job was executing, kick ours off. Otherwise, it'll
619 * get started when the previous job's flush done interrupt
622 if (vc4_first_bin_job(vc4) == exec) {
623 vc4_submit_next_bin_job(dev);
624 vc4_queue_hangcheck(dev);
627 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
633 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
634 * referenced by the job.
636 * @file_priv: DRM file for this fd
637 * @exec: V3D job being set up
639 * The command validator needs to reference BOs by their index within
640 * the submitted job's BO list. This does the validation of the job's
641 * BO list and reference counting for the lifetime of the job.
643 * Note that this function doesn't need to unreference the BOs on
644 * failure, because that will happen at vc4_complete_exec() time.
647 vc4_cl_lookup_bos(struct drm_device *dev,
648 struct drm_file *file_priv,
649 struct vc4_exec_info *exec)
651 struct drm_vc4_submit_cl *args = exec->args;
656 exec->bo_count = args->bo_handle_count;
658 if (!exec->bo_count) {
659 /* See comment on bo_index for why we have to check
662 DRM_DEBUG("Rendering requires BOs to validate\n");
666 exec->bo = kvmalloc_array(exec->bo_count,
667 sizeof(struct drm_gem_cma_object *),
668 GFP_KERNEL | __GFP_ZERO);
670 DRM_ERROR("Failed to allocate validated BO pointers\n");
674 handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
677 DRM_ERROR("Failed to allocate incoming GEM handles\n");
681 if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
682 exec->bo_count * sizeof(uint32_t))) {
684 DRM_ERROR("Failed to copy in GEM handles\n");
688 spin_lock(&file_priv->table_lock);
689 for (i = 0; i < exec->bo_count; i++) {
690 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
693 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
696 spin_unlock(&file_priv->table_lock);
699 drm_gem_object_get(bo);
700 exec->bo[i] = (struct drm_gem_cma_object *)bo;
702 spin_unlock(&file_priv->table_lock);
710 vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
712 struct drm_vc4_submit_cl *args = exec->args;
716 uint32_t bin_offset = 0;
717 uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
719 uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
720 uint32_t exec_size = uniforms_offset + args->uniforms_size;
721 uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
722 args->shader_rec_count);
725 if (shader_rec_offset < args->bin_cl_size ||
726 uniforms_offset < shader_rec_offset ||
727 exec_size < uniforms_offset ||
728 args->shader_rec_count >= (UINT_MAX /
729 sizeof(struct vc4_shader_state)) ||
730 temp_size < exec_size) {
731 DRM_DEBUG("overflow in exec arguments\n");
736 /* Allocate space where we'll store the copied in user command lists
737 * and shader records.
739 * We don't just copy directly into the BOs because we need to
740 * read the contents back for validation, and I think the
741 * bo->vaddr is uncached access.
743 temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
745 DRM_ERROR("Failed to allocate storage for copying "
746 "in bin/render CLs.\n");
750 bin = temp + bin_offset;
751 exec->shader_rec_u = temp + shader_rec_offset;
752 exec->uniforms_u = temp + uniforms_offset;
753 exec->shader_state = temp + exec_size;
754 exec->shader_state_size = args->shader_rec_count;
756 if (copy_from_user(bin,
757 u64_to_user_ptr(args->bin_cl),
758 args->bin_cl_size)) {
763 if (copy_from_user(exec->shader_rec_u,
764 u64_to_user_ptr(args->shader_rec),
765 args->shader_rec_size)) {
770 if (copy_from_user(exec->uniforms_u,
771 u64_to_user_ptr(args->uniforms),
772 args->uniforms_size)) {
777 bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
779 DRM_ERROR("Couldn't allocate BO for binning\n");
783 exec->exec_bo = &bo->base;
785 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
788 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
792 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
793 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
794 exec->shader_rec_size = args->shader_rec_size;
796 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
797 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
798 exec->uniforms_size = args->uniforms_size;
800 ret = vc4_validate_bin_cl(dev,
801 exec->exec_bo->vaddr + bin_offset,
807 ret = vc4_validate_shader_recs(dev, exec);
811 /* Block waiting on any previous rendering into the CS's VBO,
812 * IB, or textures, so that pixels are actually written by the
813 * time we try to read them.
815 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
823 vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
825 struct vc4_dev *vc4 = to_vc4_dev(dev);
826 unsigned long irqflags;
829 /* If we got force-completed because of GPU reset rather than
830 * through our IRQ handler, signal the fence now.
833 dma_fence_signal(exec->fence);
834 dma_fence_put(exec->fence);
838 for (i = 0; i < exec->bo_count; i++)
839 drm_gem_object_put_unlocked(&exec->bo[i]->base);
843 while (!list_empty(&exec->unref_list)) {
844 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
845 struct vc4_bo, unref_head);
846 list_del(&bo->unref_head);
847 drm_gem_object_put_unlocked(&bo->base.base);
850 /* Free up the allocation of any bin slots we used. */
851 spin_lock_irqsave(&vc4->job_lock, irqflags);
852 vc4->bin_alloc_used &= ~exec->bin_slots;
853 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
855 mutex_lock(&vc4->power_lock);
856 if (--vc4->power_refcount == 0) {
857 pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
858 pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
860 mutex_unlock(&vc4->power_lock);
866 vc4_job_handle_completed(struct vc4_dev *vc4)
868 unsigned long irqflags;
869 struct vc4_seqno_cb *cb, *cb_temp;
871 spin_lock_irqsave(&vc4->job_lock, irqflags);
872 while (!list_empty(&vc4->job_done_list)) {
873 struct vc4_exec_info *exec =
874 list_first_entry(&vc4->job_done_list,
875 struct vc4_exec_info, head);
876 list_del(&exec->head);
878 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
879 vc4_complete_exec(vc4->dev, exec);
880 spin_lock_irqsave(&vc4->job_lock, irqflags);
883 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
884 if (cb->seqno <= vc4->finished_seqno) {
885 list_del_init(&cb->work.entry);
886 schedule_work(&cb->work);
890 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
893 static void vc4_seqno_cb_work(struct work_struct *work)
895 struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
900 int vc4_queue_seqno_cb(struct drm_device *dev,
901 struct vc4_seqno_cb *cb, uint64_t seqno,
902 void (*func)(struct vc4_seqno_cb *cb))
904 struct vc4_dev *vc4 = to_vc4_dev(dev);
906 unsigned long irqflags;
909 INIT_WORK(&cb->work, vc4_seqno_cb_work);
911 spin_lock_irqsave(&vc4->job_lock, irqflags);
912 if (seqno > vc4->finished_seqno) {
914 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
916 schedule_work(&cb->work);
918 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
923 /* Scheduled when any job has been completed, this walks the list of
924 * jobs that had completed and unrefs their BOs and frees their exec
928 vc4_job_done_work(struct work_struct *work)
930 struct vc4_dev *vc4 =
931 container_of(work, struct vc4_dev, job_done_work);
933 vc4_job_handle_completed(vc4);
937 vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
939 uint64_t *timeout_ns)
941 unsigned long start = jiffies;
942 int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
944 if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
945 uint64_t delta = jiffies_to_nsecs(jiffies - start);
947 if (*timeout_ns >= delta)
948 *timeout_ns -= delta;
955 vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
956 struct drm_file *file_priv)
958 struct drm_vc4_wait_seqno *args = data;
960 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
965 vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
966 struct drm_file *file_priv)
969 struct drm_vc4_wait_bo *args = data;
970 struct drm_gem_object *gem_obj;
976 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
978 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
981 bo = to_vc4_bo(gem_obj);
983 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
986 drm_gem_object_put_unlocked(gem_obj);
991 * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
993 * @data: ioctl argument
994 * @file_priv: DRM file for this fd
996 * This is the main entrypoint for userspace to submit a 3D frame to
997 * the GPU. Userspace provides the binner command list (if
998 * applicable), and the kernel sets up the render command list to draw
999 * to the framebuffer described in the ioctl, using the command lists
1000 * that the 3D engine's binner will produce.
1003 vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1004 struct drm_file *file_priv)
1006 struct vc4_dev *vc4 = to_vc4_dev(dev);
1007 struct drm_vc4_submit_cl *args = data;
1008 struct vc4_exec_info *exec;
1009 struct ww_acquire_ctx acquire_ctx;
1012 if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1013 VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1014 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1015 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1016 DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1020 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1022 DRM_ERROR("malloc failure on exec struct\n");
1026 mutex_lock(&vc4->power_lock);
1027 if (vc4->power_refcount++ == 0) {
1028 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
1030 mutex_unlock(&vc4->power_lock);
1031 vc4->power_refcount--;
1036 mutex_unlock(&vc4->power_lock);
1039 INIT_LIST_HEAD(&exec->unref_list);
1041 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1045 if (exec->args->bin_cl_size != 0) {
1046 ret = vc4_get_bcl(dev, exec);
1054 ret = vc4_get_rcl(dev, exec);
1058 ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1062 /* Clear this out of the struct we'll be putting in the queue,
1063 * since it's part of our stack.
1067 ret = vc4_queue_submit(dev, exec, &acquire_ctx);
1071 /* Return the seqno for our job. */
1072 args->seqno = vc4->emit_seqno;
1077 vc4_complete_exec(vc4->dev, exec);
1083 vc4_gem_init(struct drm_device *dev)
1085 struct vc4_dev *vc4 = to_vc4_dev(dev);
1087 vc4->dma_fence_context = dma_fence_context_alloc(1);
1089 INIT_LIST_HEAD(&vc4->bin_job_list);
1090 INIT_LIST_HEAD(&vc4->render_job_list);
1091 INIT_LIST_HEAD(&vc4->job_done_list);
1092 INIT_LIST_HEAD(&vc4->seqno_cb_list);
1093 spin_lock_init(&vc4->job_lock);
1095 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1096 setup_timer(&vc4->hangcheck.timer,
1097 vc4_hangcheck_elapsed,
1098 (unsigned long)dev);
1100 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1102 mutex_init(&vc4->power_lock);
1106 vc4_gem_destroy(struct drm_device *dev)
1108 struct vc4_dev *vc4 = to_vc4_dev(dev);
1110 /* Waiting for exec to finish would need to be done before
1111 * unregistering V3D.
1113 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1115 /* V3D should already have disabled its interrupt and cleared
1116 * the overflow allocation registers. Now free the object.
1119 drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
1123 if (vc4->hang_state)
1124 vc4_free_hang_state(dev, vc4->hang_state);