1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <linux/sched/signal.h>
30 #include "vmwgfx_drv.h"
32 #define VMW_FENCE_WRAP (1 << 31)
34 struct vmw_fence_manager {
35 int num_fence_objects;
36 struct vmw_private *dev_priv;
38 struct list_head fence_list;
39 struct work_struct work;
41 struct list_head cleanup_list;
42 uint32_t pending_actions[VMW_ACTION_MAX];
43 struct mutex goal_irq_mutex;
44 bool goal_irq_on; /* Protected by @goal_irq_mutex */
45 bool seqno_valid; /* Protected by @lock, and may not be set to true
46 without the @goal_irq_mutex held. */
50 struct vmw_user_fence {
51 struct ttm_base_object base;
52 struct vmw_fence_obj fence;
56 * struct vmw_event_fence_action - fence action that delivers a drm event.
58 * @action: A struct vmw_fence_action to hook up to a fence.
59 * @event: A pointer to the pending event.
60 * @fence: A referenced pointer to the fence to keep it alive while @action
62 * @dev: Pointer to a struct drm_device so we can access the event stuff.
63 * @tv_sec: If non-null, the variable pointed to will be assigned
64 * current time tv_sec val when the fence signals.
65 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
66 * be assigned the current time tv_usec val when the fence signals.
68 struct vmw_event_fence_action {
69 struct vmw_fence_action action;
71 struct drm_pending_event *event;
72 struct vmw_fence_obj *fence;
73 struct drm_device *dev;
79 static struct vmw_fence_manager *
80 fman_from_fence(struct vmw_fence_obj *fence)
82 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
85 static u32 vmw_fence_goal_read(struct vmw_private *vmw)
87 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
88 return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
90 return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
93 static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
95 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
96 vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
98 vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
102 * Note on fencing subsystem usage of irqs:
103 * Typically the vmw_fences_update function is called
105 * a) When a new fence seqno has been submitted by the fifo code.
106 * b) On-demand when we have waiters. Sleeping waiters will switch on the
107 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
108 * irq is received. When the last fence waiter is gone, that IRQ is masked
111 * In situations where there are no waiters and we don't submit any new fences,
112 * fence objects may not be signaled. This is perfectly OK, since there are
113 * no consumers of the signaled data, but that is NOT ok when there are fence
114 * actions attached to a fence. The fencing subsystem then makes use of the
115 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
116 * which has an action attached, and each time vmw_fences_update is called,
117 * the subsystem makes sure the fence goal seqno is updated.
119 * The fence goal seqno irq is on as long as there are unsignaled fence
120 * objects with actions attached to them.
123 static void vmw_fence_obj_destroy(struct dma_fence *f)
125 struct vmw_fence_obj *fence =
126 container_of(f, struct vmw_fence_obj, base);
128 struct vmw_fence_manager *fman = fman_from_fence(fence);
130 spin_lock(&fman->lock);
131 list_del_init(&fence->head);
132 --fman->num_fence_objects;
133 spin_unlock(&fman->lock);
134 fence->destroy(fence);
137 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
142 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
147 static bool vmw_fence_enable_signaling(struct dma_fence *f)
149 struct vmw_fence_obj *fence =
150 container_of(f, struct vmw_fence_obj, base);
152 struct vmw_fence_manager *fman = fman_from_fence(fence);
153 struct vmw_private *dev_priv = fman->dev_priv;
155 u32 seqno = vmw_fence_read(dev_priv);
156 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
162 struct vmwgfx_wait_cb {
163 struct dma_fence_cb base;
164 struct task_struct *task;
168 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
170 struct vmwgfx_wait_cb *wait =
171 container_of(cb, struct vmwgfx_wait_cb, base);
173 wake_up_process(wait->task);
176 static void __vmw_fences_update(struct vmw_fence_manager *fman);
178 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
180 struct vmw_fence_obj *fence =
181 container_of(f, struct vmw_fence_obj, base);
183 struct vmw_fence_manager *fman = fman_from_fence(fence);
184 struct vmw_private *dev_priv = fman->dev_priv;
185 struct vmwgfx_wait_cb cb;
188 if (likely(vmw_fence_obj_signaled(fence)))
191 vmw_seqno_waiter_add(dev_priv);
195 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
198 if (intr && signal_pending(current)) {
203 cb.base.func = vmwgfx_wait_cb;
205 list_add(&cb.base.node, &f->cb_list);
208 __vmw_fences_update(fman);
211 * We can use the barrier free __set_current_state() since
212 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
216 __set_current_state(TASK_INTERRUPTIBLE);
218 __set_current_state(TASK_UNINTERRUPTIBLE);
220 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
221 if (ret == 0 && timeout > 0)
226 if (intr && signal_pending(current)) {
234 spin_unlock(f->lock);
236 ret = schedule_timeout(ret);
240 __set_current_state(TASK_RUNNING);
241 if (!list_empty(&cb.base.node))
242 list_del(&cb.base.node);
245 spin_unlock(f->lock);
247 vmw_seqno_waiter_remove(dev_priv);
252 static const struct dma_fence_ops vmw_fence_ops = {
253 .get_driver_name = vmw_fence_get_driver_name,
254 .get_timeline_name = vmw_fence_get_timeline_name,
255 .enable_signaling = vmw_fence_enable_signaling,
256 .wait = vmw_fence_wait,
257 .release = vmw_fence_obj_destroy,
262 * Execute signal actions on fences recently signaled.
263 * This is done from a workqueue so we don't have to execute
264 * signal actions from atomic context.
267 static void vmw_fence_work_func(struct work_struct *work)
269 struct vmw_fence_manager *fman =
270 container_of(work, struct vmw_fence_manager, work);
271 struct list_head list;
272 struct vmw_fence_action *action, *next_action;
276 INIT_LIST_HEAD(&list);
277 mutex_lock(&fman->goal_irq_mutex);
279 spin_lock(&fman->lock);
280 list_splice_init(&fman->cleanup_list, &list);
281 seqno_valid = fman->seqno_valid;
282 spin_unlock(&fman->lock);
284 if (!seqno_valid && fman->goal_irq_on) {
285 fman->goal_irq_on = false;
286 vmw_goal_waiter_remove(fman->dev_priv);
288 mutex_unlock(&fman->goal_irq_mutex);
290 if (list_empty(&list))
294 * At this point, only we should be able to manipulate the
295 * list heads of the actions we have on the private list.
296 * hence fman::lock not held.
299 list_for_each_entry_safe(action, next_action, &list, head) {
300 list_del_init(&action->head);
302 action->cleanup(action);
307 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
309 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
314 fman->dev_priv = dev_priv;
315 spin_lock_init(&fman->lock);
316 INIT_LIST_HEAD(&fman->fence_list);
317 INIT_LIST_HEAD(&fman->cleanup_list);
318 INIT_WORK(&fman->work, &vmw_fence_work_func);
319 fman->fifo_down = true;
320 mutex_init(&fman->goal_irq_mutex);
321 fman->ctx = dma_fence_context_alloc(1);
326 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
330 (void) cancel_work_sync(&fman->work);
332 spin_lock(&fman->lock);
333 lists_empty = list_empty(&fman->fence_list) &&
334 list_empty(&fman->cleanup_list);
335 spin_unlock(&fman->lock);
337 BUG_ON(!lists_empty);
341 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
342 struct vmw_fence_obj *fence, u32 seqno,
343 void (*destroy) (struct vmw_fence_obj *fence))
347 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
349 INIT_LIST_HEAD(&fence->seq_passed_actions);
350 fence->destroy = destroy;
352 spin_lock(&fman->lock);
353 if (unlikely(fman->fifo_down)) {
357 list_add_tail(&fence->head, &fman->fence_list);
358 ++fman->num_fence_objects;
361 spin_unlock(&fman->lock);
366 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
367 struct list_head *list)
369 struct vmw_fence_action *action, *next_action;
371 list_for_each_entry_safe(action, next_action, list, head) {
372 list_del_init(&action->head);
373 fman->pending_actions[action->type]--;
374 if (action->seq_passed != NULL)
375 action->seq_passed(action);
378 * Add the cleanup action to the cleanup list so that
379 * it will be performed by a worker task.
382 list_add_tail(&action->head, &fman->cleanup_list);
387 * vmw_fence_goal_new_locked - Figure out a new device fence goal
390 * @fman: Pointer to a fence manager.
391 * @passed_seqno: The seqno the device currently signals as passed.
393 * This function should be called with the fence manager lock held.
394 * It is typically called when we have a new passed_seqno, and
395 * we might need to update the fence goal. It checks to see whether
396 * the current fence goal has already passed, and, in that case,
397 * scans through all unsignaled fences to get the next fence object with an
398 * action attached, and sets the seqno of that fence as a new fence goal.
400 * returns true if the device goal seqno was updated. False otherwise.
402 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
406 struct vmw_fence_obj *fence;
408 if (likely(!fman->seqno_valid))
411 goal_seqno = vmw_fence_goal_read(fman->dev_priv);
412 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
415 fman->seqno_valid = false;
416 list_for_each_entry(fence, &fman->fence_list, head) {
417 if (!list_empty(&fence->seq_passed_actions)) {
418 fman->seqno_valid = true;
419 vmw_fence_goal_write(fman->dev_priv,
430 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
433 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
434 * considered as a device fence goal.
436 * This function should be called with the fence manager lock held.
437 * It is typically called when an action has been attached to a fence to
438 * check whether the seqno of that fence should be used for a fence
439 * goal interrupt. This is typically needed if the current fence goal is
440 * invalid, or has a higher seqno than that of the current fence object.
442 * returns true if the device goal seqno was updated. False otherwise.
444 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
446 struct vmw_fence_manager *fman = fman_from_fence(fence);
449 if (dma_fence_is_signaled_locked(&fence->base))
452 goal_seqno = vmw_fence_goal_read(fman->dev_priv);
453 if (likely(fman->seqno_valid &&
454 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
457 vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
458 fman->seqno_valid = true;
463 static void __vmw_fences_update(struct vmw_fence_manager *fman)
465 struct vmw_fence_obj *fence, *next_fence;
466 struct list_head action_list;
468 uint32_t seqno, new_seqno;
470 seqno = vmw_fence_read(fman->dev_priv);
472 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
473 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
474 list_del_init(&fence->head);
475 dma_fence_signal_locked(&fence->base);
476 INIT_LIST_HEAD(&action_list);
477 list_splice_init(&fence->seq_passed_actions,
479 vmw_fences_perform_actions(fman, &action_list);
485 * Rerun if the fence goal seqno was updated, and the
486 * hardware might have raced with that update, so that
487 * we missed a fence_goal irq.
490 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
491 if (unlikely(needs_rerun)) {
492 new_seqno = vmw_fence_read(fman->dev_priv);
493 if (new_seqno != seqno) {
499 if (!list_empty(&fman->cleanup_list))
500 (void) schedule_work(&fman->work);
503 void vmw_fences_update(struct vmw_fence_manager *fman)
505 spin_lock(&fman->lock);
506 __vmw_fences_update(fman);
507 spin_unlock(&fman->lock);
510 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
512 struct vmw_fence_manager *fman = fman_from_fence(fence);
514 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
517 vmw_fences_update(fman);
519 return dma_fence_is_signaled(&fence->base);
522 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
523 bool interruptible, unsigned long timeout)
525 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
535 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
537 dma_fence_free(&fence->base);
540 int vmw_fence_create(struct vmw_fence_manager *fman,
542 struct vmw_fence_obj **p_fence)
544 struct vmw_fence_obj *fence;
547 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
548 if (unlikely(!fence))
551 ret = vmw_fence_obj_init(fman, fence, seqno,
553 if (unlikely(ret != 0))
565 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
567 struct vmw_user_fence *ufence =
568 container_of(fence, struct vmw_user_fence, fence);
570 ttm_base_object_kfree(ufence, base);
573 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
575 struct ttm_base_object *base = *p_base;
576 struct vmw_user_fence *ufence =
577 container_of(base, struct vmw_user_fence, base);
578 struct vmw_fence_obj *fence = &ufence->fence;
581 vmw_fence_obj_unreference(&fence);
584 int vmw_user_fence_create(struct drm_file *file_priv,
585 struct vmw_fence_manager *fman,
587 struct vmw_fence_obj **p_fence,
590 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
591 struct vmw_user_fence *ufence;
592 struct vmw_fence_obj *tmp;
595 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
596 if (unlikely(!ufence)) {
601 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
602 vmw_user_fence_destroy);
603 if (unlikely(ret != 0)) {
609 * The base object holds a reference which is freed in
610 * vmw_user_fence_base_release.
612 tmp = vmw_fence_obj_reference(&ufence->fence);
614 ret = ttm_base_object_init(tfile, &ufence->base, false,
616 &vmw_user_fence_base_release);
619 if (unlikely(ret != 0)) {
621 * Free the base object's reference
623 vmw_fence_obj_unreference(&tmp);
627 *p_fence = &ufence->fence;
628 *p_handle = ufence->base.handle;
632 tmp = &ufence->fence;
633 vmw_fence_obj_unreference(&tmp);
639 * vmw_fence_fifo_down - signal all unsignaled fence objects.
642 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
644 struct list_head action_list;
648 * The list may be altered while we traverse it, so always
649 * restart when we've released the fman->lock.
652 spin_lock(&fman->lock);
653 fman->fifo_down = true;
654 while (!list_empty(&fman->fence_list)) {
655 struct vmw_fence_obj *fence =
656 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
658 dma_fence_get(&fence->base);
659 spin_unlock(&fman->lock);
661 ret = vmw_fence_obj_wait(fence, false, false,
662 VMW_FENCE_WAIT_TIMEOUT);
664 if (unlikely(ret != 0)) {
665 list_del_init(&fence->head);
666 dma_fence_signal(&fence->base);
667 INIT_LIST_HEAD(&action_list);
668 list_splice_init(&fence->seq_passed_actions,
670 vmw_fences_perform_actions(fman, &action_list);
673 BUG_ON(!list_empty(&fence->head));
674 dma_fence_put(&fence->base);
675 spin_lock(&fman->lock);
677 spin_unlock(&fman->lock);
680 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
682 spin_lock(&fman->lock);
683 fman->fifo_down = false;
684 spin_unlock(&fman->lock);
689 * vmw_fence_obj_lookup - Look up a user-space fence object
691 * @tfile: A struct ttm_object_file identifying the caller.
692 * @handle: A handle identifying the fence object.
693 * @return: A struct vmw_user_fence base ttm object on success or
694 * an error pointer on failure.
696 * The fence object is looked up and type-checked. The caller needs
697 * to have opened the fence object first, but since that happens on
698 * creation and fence objects aren't shareable, that's not an
701 static struct ttm_base_object *
702 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
704 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
707 pr_err("Invalid fence object handle 0x%08lx.\n",
708 (unsigned long)handle);
709 return ERR_PTR(-EINVAL);
712 if (base->refcount_release != vmw_user_fence_base_release) {
713 pr_err("Invalid fence object handle 0x%08lx.\n",
714 (unsigned long)handle);
715 ttm_base_object_unref(&base);
716 return ERR_PTR(-EINVAL);
723 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
724 struct drm_file *file_priv)
726 struct drm_vmw_fence_wait_arg *arg =
727 (struct drm_vmw_fence_wait_arg *)data;
728 unsigned long timeout;
729 struct ttm_base_object *base;
730 struct vmw_fence_obj *fence;
731 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
733 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
736 * 64-bit division not present on 32-bit systems, so do an
737 * approximation. (Divide by 1000000).
740 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
741 (wait_timeout >> 26);
743 if (!arg->cookie_valid) {
744 arg->cookie_valid = 1;
745 arg->kernel_cookie = jiffies + wait_timeout;
748 base = vmw_fence_obj_lookup(tfile, arg->handle);
750 return PTR_ERR(base);
752 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
755 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
756 ret = ((vmw_fence_obj_signaled(fence)) ?
761 timeout = (unsigned long)arg->kernel_cookie - timeout;
763 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
766 ttm_base_object_unref(&base);
769 * Optionally unref the fence object.
772 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
773 return ttm_ref_object_base_unref(tfile, arg->handle);
777 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
778 struct drm_file *file_priv)
780 struct drm_vmw_fence_signaled_arg *arg =
781 (struct drm_vmw_fence_signaled_arg *) data;
782 struct ttm_base_object *base;
783 struct vmw_fence_obj *fence;
784 struct vmw_fence_manager *fman;
785 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
786 struct vmw_private *dev_priv = vmw_priv(dev);
788 base = vmw_fence_obj_lookup(tfile, arg->handle);
790 return PTR_ERR(base);
792 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
793 fman = fman_from_fence(fence);
795 arg->signaled = vmw_fence_obj_signaled(fence);
797 arg->signaled_flags = arg->flags;
798 spin_lock(&fman->lock);
799 arg->passed_seqno = dev_priv->last_read_seqno;
800 spin_unlock(&fman->lock);
802 ttm_base_object_unref(&base);
808 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
809 struct drm_file *file_priv)
811 struct drm_vmw_fence_arg *arg =
812 (struct drm_vmw_fence_arg *) data;
814 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
819 * vmw_event_fence_action_seq_passed
821 * @action: The struct vmw_fence_action embedded in a struct
822 * vmw_event_fence_action.
824 * This function is called when the seqno of the fence where @action is
825 * attached has passed. It queues the event on the submitter's event list.
826 * This function is always called from atomic context.
828 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
830 struct vmw_event_fence_action *eaction =
831 container_of(action, struct vmw_event_fence_action, action);
832 struct drm_device *dev = eaction->dev;
833 struct drm_pending_event *event = eaction->event;
835 if (unlikely(event == NULL))
838 spin_lock_irq(&dev->event_lock);
840 if (likely(eaction->tv_sec != NULL)) {
841 struct timespec64 ts;
844 /* monotonic time, so no y2038 overflow */
845 *eaction->tv_sec = ts.tv_sec;
846 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
849 drm_send_event_locked(dev, eaction->event);
850 eaction->event = NULL;
851 spin_unlock_irq(&dev->event_lock);
855 * vmw_event_fence_action_cleanup
857 * @action: The struct vmw_fence_action embedded in a struct
858 * vmw_event_fence_action.
860 * This function is the struct vmw_fence_action destructor. It's typically
861 * called from a workqueue.
863 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
865 struct vmw_event_fence_action *eaction =
866 container_of(action, struct vmw_event_fence_action, action);
868 vmw_fence_obj_unreference(&eaction->fence);
874 * vmw_fence_obj_add_action - Add an action to a fence object.
876 * @fence: The fence object.
877 * @action: The action to add.
879 * Note that the action callbacks may be executed before this function
882 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
883 struct vmw_fence_action *action)
885 struct vmw_fence_manager *fman = fman_from_fence(fence);
886 bool run_update = false;
888 mutex_lock(&fman->goal_irq_mutex);
889 spin_lock(&fman->lock);
891 fman->pending_actions[action->type]++;
892 if (dma_fence_is_signaled_locked(&fence->base)) {
893 struct list_head action_list;
895 INIT_LIST_HEAD(&action_list);
896 list_add_tail(&action->head, &action_list);
897 vmw_fences_perform_actions(fman, &action_list);
899 list_add_tail(&action->head, &fence->seq_passed_actions);
902 * This function may set fman::seqno_valid, so it must
903 * be run with the goal_irq_mutex held.
905 run_update = vmw_fence_goal_check_locked(fence);
908 spin_unlock(&fman->lock);
911 if (!fman->goal_irq_on) {
912 fman->goal_irq_on = true;
913 vmw_goal_waiter_add(fman->dev_priv);
915 vmw_fences_update(fman);
917 mutex_unlock(&fman->goal_irq_mutex);
922 * vmw_event_fence_action_queue - Post an event for sending when a fence
923 * object seqno has passed.
925 * @file_priv: The file connection on which the event should be posted.
926 * @fence: The fence object on which to post the event.
927 * @event: Event to be posted. This event should've been alloced
928 * using k[mz]alloc, and should've been completely initialized.
929 * @tv_sec: If non-null, the variable pointed to will be assigned
930 * current time tv_sec val when the fence signals.
931 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
932 * be assigned the current time tv_usec val when the fence signals.
933 * @interruptible: Interruptible waits if possible.
935 * As a side effect, the object pointed to by @event may have been
936 * freed when this function returns. If this function returns with
937 * an error code, the caller needs to free that object.
940 int vmw_event_fence_action_queue(struct drm_file *file_priv,
941 struct vmw_fence_obj *fence,
942 struct drm_pending_event *event,
947 struct vmw_event_fence_action *eaction;
948 struct vmw_fence_manager *fman = fman_from_fence(fence);
950 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
951 if (unlikely(!eaction))
954 eaction->event = event;
956 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
957 eaction->action.cleanup = vmw_event_fence_action_cleanup;
958 eaction->action.type = VMW_ACTION_EVENT;
960 eaction->fence = vmw_fence_obj_reference(fence);
961 eaction->dev = &fman->dev_priv->drm;
962 eaction->tv_sec = tv_sec;
963 eaction->tv_usec = tv_usec;
965 vmw_fence_obj_add_action(fence, &eaction->action);
970 struct vmw_event_fence_pending {
971 struct drm_pending_event base;
972 struct drm_vmw_event_fence event;
975 static int vmw_event_fence_action_create(struct drm_file *file_priv,
976 struct vmw_fence_obj *fence,
981 struct vmw_event_fence_pending *event;
982 struct vmw_fence_manager *fman = fman_from_fence(fence);
983 struct drm_device *dev = &fman->dev_priv->drm;
986 event = kzalloc(sizeof(*event), GFP_KERNEL);
987 if (unlikely(!event)) {
988 DRM_ERROR("Failed to allocate an event.\n");
993 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
994 event->event.base.length = sizeof(*event);
995 event->event.user_data = user_data;
997 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
999 if (unlikely(ret != 0)) {
1000 DRM_ERROR("Failed to allocate event space for this file.\n");
1005 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1006 ret = vmw_event_fence_action_queue(file_priv, fence,
1008 &event->event.tv_sec,
1009 &event->event.tv_usec,
1012 ret = vmw_event_fence_action_queue(file_priv, fence,
1023 drm_event_cancel_free(dev, &event->base);
1028 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1029 struct drm_file *file_priv)
1031 struct vmw_private *dev_priv = vmw_priv(dev);
1032 struct drm_vmw_fence_event_arg *arg =
1033 (struct drm_vmw_fence_event_arg *) data;
1034 struct vmw_fence_obj *fence = NULL;
1035 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1036 struct ttm_object_file *tfile = vmw_fp->tfile;
1037 struct drm_vmw_fence_rep __user *user_fence_rep =
1038 (struct drm_vmw_fence_rep __user *)(unsigned long)
1044 * Look up an existing fence object,
1045 * and if user-space wants a new reference,
1049 struct ttm_base_object *base =
1050 vmw_fence_obj_lookup(tfile, arg->handle);
1053 return PTR_ERR(base);
1055 fence = &(container_of(base, struct vmw_user_fence,
1057 (void) vmw_fence_obj_reference(fence);
1059 if (user_fence_rep != NULL) {
1060 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1062 if (unlikely(ret != 0)) {
1063 DRM_ERROR("Failed to reference a fence "
1065 goto out_no_ref_obj;
1067 handle = base->handle;
1069 ttm_base_object_unref(&base);
1073 * Create a new fence object.
1076 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1080 if (unlikely(ret != 0)) {
1081 DRM_ERROR("Fence event failed to create fence.\n");
1086 BUG_ON(fence == NULL);
1088 ret = vmw_event_fence_action_create(file_priv, fence,
1092 if (unlikely(ret != 0)) {
1093 if (ret != -ERESTARTSYS)
1094 DRM_ERROR("Failed to attach event to fence.\n");
1098 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1100 vmw_fence_obj_unreference(&fence);
1103 if (user_fence_rep != NULL)
1104 ttm_ref_object_base_unref(tfile, handle);
1106 vmw_fence_obj_unreference(&fence);