GNU Linux-libre 4.19.295-gnu1
[releases.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fence.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/drmP.h>
29 #include "vmwgfx_drv.h"
30
31 #define VMW_FENCE_WRAP (1 << 31)
32
33 struct vmw_fence_manager {
34         int num_fence_objects;
35         struct vmw_private *dev_priv;
36         spinlock_t lock;
37         struct list_head fence_list;
38         struct work_struct work;
39         u32 user_fence_size;
40         u32 fence_size;
41         u32 event_fence_action_size;
42         bool fifo_down;
43         struct list_head cleanup_list;
44         uint32_t pending_actions[VMW_ACTION_MAX];
45         struct mutex goal_irq_mutex;
46         bool goal_irq_on; /* Protected by @goal_irq_mutex */
47         bool seqno_valid; /* Protected by @lock, and may not be set to true
48                              without the @goal_irq_mutex held. */
49         u64 ctx;
50 };
51
52 struct vmw_user_fence {
53         struct ttm_base_object base;
54         struct vmw_fence_obj fence;
55 };
56
57 /**
58  * struct vmw_event_fence_action - fence action that delivers a drm event.
59  *
60  * @e: A struct drm_pending_event that controls the event delivery.
61  * @action: A struct vmw_fence_action to hook up to a fence.
62  * @fence: A referenced pointer to the fence to keep it alive while @action
63  * hangs on it.
64  * @dev: Pointer to a struct drm_device so we can access the event stuff.
65  * @kref: Both @e and @action has destructors, so we need to refcount.
66  * @size: Size accounted for this object.
67  * @tv_sec: If non-null, the variable pointed to will be assigned
68  * current time tv_sec val when the fence signals.
69  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
70  * be assigned the current time tv_usec val when the fence signals.
71  */
72 struct vmw_event_fence_action {
73         struct vmw_fence_action action;
74
75         struct drm_pending_event *event;
76         struct vmw_fence_obj *fence;
77         struct drm_device *dev;
78
79         uint32_t *tv_sec;
80         uint32_t *tv_usec;
81 };
82
83 static struct vmw_fence_manager *
84 fman_from_fence(struct vmw_fence_obj *fence)
85 {
86         return container_of(fence->base.lock, struct vmw_fence_manager, lock);
87 }
88
89 /**
90  * Note on fencing subsystem usage of irqs:
91  * Typically the vmw_fences_update function is called
92  *
93  * a) When a new fence seqno has been submitted by the fifo code.
94  * b) On-demand when we have waiters. Sleeping waiters will switch on the
95  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
96  * irq is received. When the last fence waiter is gone, that IRQ is masked
97  * away.
98  *
99  * In situations where there are no waiters and we don't submit any new fences,
100  * fence objects may not be signaled. This is perfectly OK, since there are
101  * no consumers of the signaled data, but that is NOT ok when there are fence
102  * actions attached to a fence. The fencing subsystem then makes use of the
103  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
104  * which has an action attached, and each time vmw_fences_update is called,
105  * the subsystem makes sure the fence goal seqno is updated.
106  *
107  * The fence goal seqno irq is on as long as there are unsignaled fence
108  * objects with actions attached to them.
109  */
110
111 static void vmw_fence_obj_destroy(struct dma_fence *f)
112 {
113         struct vmw_fence_obj *fence =
114                 container_of(f, struct vmw_fence_obj, base);
115
116         struct vmw_fence_manager *fman = fman_from_fence(fence);
117
118         spin_lock(&fman->lock);
119         list_del_init(&fence->head);
120         --fman->num_fence_objects;
121         spin_unlock(&fman->lock);
122         fence->destroy(fence);
123 }
124
125 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
126 {
127         return "vmwgfx";
128 }
129
130 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
131 {
132         return "svga";
133 }
134
135 static bool vmw_fence_enable_signaling(struct dma_fence *f)
136 {
137         struct vmw_fence_obj *fence =
138                 container_of(f, struct vmw_fence_obj, base);
139
140         struct vmw_fence_manager *fman = fman_from_fence(fence);
141         struct vmw_private *dev_priv = fman->dev_priv;
142
143         u32 *fifo_mem = dev_priv->mmio_virt;
144         u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
145         if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
146                 return false;
147
148         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
149
150         return true;
151 }
152
153 struct vmwgfx_wait_cb {
154         struct dma_fence_cb base;
155         struct task_struct *task;
156 };
157
158 static void
159 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
160 {
161         struct vmwgfx_wait_cb *wait =
162                 container_of(cb, struct vmwgfx_wait_cb, base);
163
164         wake_up_process(wait->task);
165 }
166
167 static void __vmw_fences_update(struct vmw_fence_manager *fman);
168
169 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
170 {
171         struct vmw_fence_obj *fence =
172                 container_of(f, struct vmw_fence_obj, base);
173
174         struct vmw_fence_manager *fman = fman_from_fence(fence);
175         struct vmw_private *dev_priv = fman->dev_priv;
176         struct vmwgfx_wait_cb cb;
177         long ret = timeout;
178
179         if (likely(vmw_fence_obj_signaled(fence)))
180                 return timeout;
181
182         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
183         vmw_seqno_waiter_add(dev_priv);
184
185         spin_lock(f->lock);
186
187         if (intr && signal_pending(current)) {
188                 ret = -ERESTARTSYS;
189                 goto out;
190         }
191
192         cb.base.func = vmwgfx_wait_cb;
193         cb.task = current;
194         list_add(&cb.base.node, &f->cb_list);
195
196         for (;;) {
197                 __vmw_fences_update(fman);
198
199                 /*
200                  * We can use the barrier free __set_current_state() since
201                  * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
202                  * fence spinlock.
203                  */
204                 if (intr)
205                         __set_current_state(TASK_INTERRUPTIBLE);
206                 else
207                         __set_current_state(TASK_UNINTERRUPTIBLE);
208
209                 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
210                         if (ret == 0 && timeout > 0)
211                                 ret = 1;
212                         break;
213                 }
214
215                 if (intr && signal_pending(current)) {
216                         ret = -ERESTARTSYS;
217                         break;
218                 }
219
220                 if (ret == 0)
221                         break;
222
223                 spin_unlock(f->lock);
224
225                 ret = schedule_timeout(ret);
226
227                 spin_lock(f->lock);
228         }
229         __set_current_state(TASK_RUNNING);
230         if (!list_empty(&cb.base.node))
231                 list_del(&cb.base.node);
232
233 out:
234         spin_unlock(f->lock);
235
236         vmw_seqno_waiter_remove(dev_priv);
237
238         return ret;
239 }
240
241 static const struct dma_fence_ops vmw_fence_ops = {
242         .get_driver_name = vmw_fence_get_driver_name,
243         .get_timeline_name = vmw_fence_get_timeline_name,
244         .enable_signaling = vmw_fence_enable_signaling,
245         .wait = vmw_fence_wait,
246         .release = vmw_fence_obj_destroy,
247 };
248
249
250 /**
251  * Execute signal actions on fences recently signaled.
252  * This is done from a workqueue so we don't have to execute
253  * signal actions from atomic context.
254  */
255
256 static void vmw_fence_work_func(struct work_struct *work)
257 {
258         struct vmw_fence_manager *fman =
259                 container_of(work, struct vmw_fence_manager, work);
260         struct list_head list;
261         struct vmw_fence_action *action, *next_action;
262         bool seqno_valid;
263
264         do {
265                 INIT_LIST_HEAD(&list);
266                 mutex_lock(&fman->goal_irq_mutex);
267
268                 spin_lock(&fman->lock);
269                 list_splice_init(&fman->cleanup_list, &list);
270                 seqno_valid = fman->seqno_valid;
271                 spin_unlock(&fman->lock);
272
273                 if (!seqno_valid && fman->goal_irq_on) {
274                         fman->goal_irq_on = false;
275                         vmw_goal_waiter_remove(fman->dev_priv);
276                 }
277                 mutex_unlock(&fman->goal_irq_mutex);
278
279                 if (list_empty(&list))
280                         return;
281
282                 /*
283                  * At this point, only we should be able to manipulate the
284                  * list heads of the actions we have on the private list.
285                  * hence fman::lock not held.
286                  */
287
288                 list_for_each_entry_safe(action, next_action, &list, head) {
289                         list_del_init(&action->head);
290                         if (action->cleanup)
291                                 action->cleanup(action);
292                 }
293         } while (1);
294 }
295
296 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
297 {
298         struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
299
300         if (unlikely(!fman))
301                 return NULL;
302
303         fman->dev_priv = dev_priv;
304         spin_lock_init(&fman->lock);
305         INIT_LIST_HEAD(&fman->fence_list);
306         INIT_LIST_HEAD(&fman->cleanup_list);
307         INIT_WORK(&fman->work, &vmw_fence_work_func);
308         fman->fifo_down = true;
309         fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
310         fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
311         fman->event_fence_action_size =
312                 ttm_round_pot(sizeof(struct vmw_event_fence_action));
313         mutex_init(&fman->goal_irq_mutex);
314         fman->ctx = dma_fence_context_alloc(1);
315
316         return fman;
317 }
318
319 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
320 {
321         bool lists_empty;
322
323         (void) cancel_work_sync(&fman->work);
324
325         spin_lock(&fman->lock);
326         lists_empty = list_empty(&fman->fence_list) &&
327                 list_empty(&fman->cleanup_list);
328         spin_unlock(&fman->lock);
329
330         BUG_ON(!lists_empty);
331         kfree(fman);
332 }
333
334 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
335                               struct vmw_fence_obj *fence, u32 seqno,
336                               void (*destroy) (struct vmw_fence_obj *fence))
337 {
338         int ret = 0;
339
340         dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
341                        fman->ctx, seqno);
342         INIT_LIST_HEAD(&fence->seq_passed_actions);
343         fence->destroy = destroy;
344
345         spin_lock(&fman->lock);
346         if (unlikely(fman->fifo_down)) {
347                 ret = -EBUSY;
348                 goto out_unlock;
349         }
350         list_add_tail(&fence->head, &fman->fence_list);
351         ++fman->num_fence_objects;
352
353 out_unlock:
354         spin_unlock(&fman->lock);
355         return ret;
356
357 }
358
359 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
360                                 struct list_head *list)
361 {
362         struct vmw_fence_action *action, *next_action;
363
364         list_for_each_entry_safe(action, next_action, list, head) {
365                 list_del_init(&action->head);
366                 fman->pending_actions[action->type]--;
367                 if (action->seq_passed != NULL)
368                         action->seq_passed(action);
369
370                 /*
371                  * Add the cleanup action to the cleanup list so that
372                  * it will be performed by a worker task.
373                  */
374
375                 list_add_tail(&action->head, &fman->cleanup_list);
376         }
377 }
378
379 /**
380  * vmw_fence_goal_new_locked - Figure out a new device fence goal
381  * seqno if needed.
382  *
383  * @fman: Pointer to a fence manager.
384  * @passed_seqno: The seqno the device currently signals as passed.
385  *
386  * This function should be called with the fence manager lock held.
387  * It is typically called when we have a new passed_seqno, and
388  * we might need to update the fence goal. It checks to see whether
389  * the current fence goal has already passed, and, in that case,
390  * scans through all unsignaled fences to get the next fence object with an
391  * action attached, and sets the seqno of that fence as a new fence goal.
392  *
393  * returns true if the device goal seqno was updated. False otherwise.
394  */
395 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
396                                       u32 passed_seqno)
397 {
398         u32 goal_seqno;
399         u32 *fifo_mem;
400         struct vmw_fence_obj *fence;
401
402         if (likely(!fman->seqno_valid))
403                 return false;
404
405         fifo_mem = fman->dev_priv->mmio_virt;
406         goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
407         if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
408                 return false;
409
410         fman->seqno_valid = false;
411         list_for_each_entry(fence, &fman->fence_list, head) {
412                 if (!list_empty(&fence->seq_passed_actions)) {
413                         fman->seqno_valid = true;
414                         vmw_mmio_write(fence->base.seqno,
415                                        fifo_mem + SVGA_FIFO_FENCE_GOAL);
416                         break;
417                 }
418         }
419
420         return true;
421 }
422
423
424 /**
425  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
426  * needed.
427  *
428  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
429  * considered as a device fence goal.
430  *
431  * This function should be called with the fence manager lock held.
432  * It is typically called when an action has been attached to a fence to
433  * check whether the seqno of that fence should be used for a fence
434  * goal interrupt. This is typically needed if the current fence goal is
435  * invalid, or has a higher seqno than that of the current fence object.
436  *
437  * returns true if the device goal seqno was updated. False otherwise.
438  */
439 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
440 {
441         struct vmw_fence_manager *fman = fman_from_fence(fence);
442         u32 goal_seqno;
443         u32 *fifo_mem;
444
445         if (dma_fence_is_signaled_locked(&fence->base))
446                 return false;
447
448         fifo_mem = fman->dev_priv->mmio_virt;
449         goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
450         if (likely(fman->seqno_valid &&
451                    goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
452                 return false;
453
454         vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
455         fman->seqno_valid = true;
456
457         return true;
458 }
459
460 static void __vmw_fences_update(struct vmw_fence_manager *fman)
461 {
462         struct vmw_fence_obj *fence, *next_fence;
463         struct list_head action_list;
464         bool needs_rerun;
465         uint32_t seqno, new_seqno;
466         u32 *fifo_mem = fman->dev_priv->mmio_virt;
467
468         seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
469 rerun:
470         list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
471                 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
472                         list_del_init(&fence->head);
473                         dma_fence_signal_locked(&fence->base);
474                         INIT_LIST_HEAD(&action_list);
475                         list_splice_init(&fence->seq_passed_actions,
476                                          &action_list);
477                         vmw_fences_perform_actions(fman, &action_list);
478                 } else
479                         break;
480         }
481
482         /*
483          * Rerun if the fence goal seqno was updated, and the
484          * hardware might have raced with that update, so that
485          * we missed a fence_goal irq.
486          */
487
488         needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
489         if (unlikely(needs_rerun)) {
490                 new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
491                 if (new_seqno != seqno) {
492                         seqno = new_seqno;
493                         goto rerun;
494                 }
495         }
496
497         if (!list_empty(&fman->cleanup_list))
498                 (void) schedule_work(&fman->work);
499 }
500
501 void vmw_fences_update(struct vmw_fence_manager *fman)
502 {
503         spin_lock(&fman->lock);
504         __vmw_fences_update(fman);
505         spin_unlock(&fman->lock);
506 }
507
508 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
509 {
510         struct vmw_fence_manager *fman = fman_from_fence(fence);
511
512         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
513                 return 1;
514
515         vmw_fences_update(fman);
516
517         return dma_fence_is_signaled(&fence->base);
518 }
519
520 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
521                        bool interruptible, unsigned long timeout)
522 {
523         long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
524
525         if (likely(ret > 0))
526                 return 0;
527         else if (ret == 0)
528                 return -EBUSY;
529         else
530                 return ret;
531 }
532
533 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
534 {
535         struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
536
537         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
538 }
539
540 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
541 {
542         dma_fence_free(&fence->base);
543 }
544
545 int vmw_fence_create(struct vmw_fence_manager *fman,
546                      uint32_t seqno,
547                      struct vmw_fence_obj **p_fence)
548 {
549         struct vmw_fence_obj *fence;
550         int ret;
551
552         fence = kzalloc(sizeof(*fence), GFP_KERNEL);
553         if (unlikely(!fence))
554                 return -ENOMEM;
555
556         ret = vmw_fence_obj_init(fman, fence, seqno,
557                                  vmw_fence_destroy);
558         if (unlikely(ret != 0))
559                 goto out_err_init;
560
561         *p_fence = fence;
562         return 0;
563
564 out_err_init:
565         kfree(fence);
566         return ret;
567 }
568
569
570 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
571 {
572         struct vmw_user_fence *ufence =
573                 container_of(fence, struct vmw_user_fence, fence);
574         struct vmw_fence_manager *fman = fman_from_fence(fence);
575
576         ttm_base_object_kfree(ufence, base);
577         /*
578          * Free kernel space accounting.
579          */
580         ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
581                             fman->user_fence_size);
582 }
583
584 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
585 {
586         struct ttm_base_object *base = *p_base;
587         struct vmw_user_fence *ufence =
588                 container_of(base, struct vmw_user_fence, base);
589         struct vmw_fence_obj *fence = &ufence->fence;
590
591         *p_base = NULL;
592         vmw_fence_obj_unreference(&fence);
593 }
594
595 int vmw_user_fence_create(struct drm_file *file_priv,
596                           struct vmw_fence_manager *fman,
597                           uint32_t seqno,
598                           struct vmw_fence_obj **p_fence,
599                           uint32_t *p_handle)
600 {
601         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
602         struct vmw_user_fence *ufence;
603         struct vmw_fence_obj *tmp;
604         struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
605         struct ttm_operation_ctx ctx = {
606                 .interruptible = false,
607                 .no_wait_gpu = false
608         };
609         int ret;
610
611         /*
612          * Kernel memory space accounting, since this object may
613          * be created by a user-space request.
614          */
615
616         ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
617                                    &ctx);
618         if (unlikely(ret != 0))
619                 return ret;
620
621         ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
622         if (unlikely(!ufence)) {
623                 ret = -ENOMEM;
624                 goto out_no_object;
625         }
626
627         ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
628                                  vmw_user_fence_destroy);
629         if (unlikely(ret != 0)) {
630                 kfree(ufence);
631                 goto out_no_object;
632         }
633
634         /*
635          * The base object holds a reference which is freed in
636          * vmw_user_fence_base_release.
637          */
638         tmp = vmw_fence_obj_reference(&ufence->fence);
639         ret = ttm_base_object_init(tfile, &ufence->base, false,
640                                    VMW_RES_FENCE,
641                                    &vmw_user_fence_base_release, NULL);
642
643
644         if (unlikely(ret != 0)) {
645                 /*
646                  * Free the base object's reference
647                  */
648                 vmw_fence_obj_unreference(&tmp);
649                 goto out_err;
650         }
651
652         *p_fence = &ufence->fence;
653         *p_handle = ufence->base.hash.key;
654
655         return 0;
656 out_err:
657         tmp = &ufence->fence;
658         vmw_fence_obj_unreference(&tmp);
659 out_no_object:
660         ttm_mem_global_free(mem_glob, fman->user_fence_size);
661         return ret;
662 }
663
664
665 /**
666  * vmw_wait_dma_fence - Wait for a dma fence
667  *
668  * @fman: pointer to a fence manager
669  * @fence: DMA fence to wait on
670  *
671  * This function handles the case when the fence is actually a fence
672  * array.  If that's the case, it'll wait on each of the child fence
673  */
674 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
675                        struct dma_fence *fence)
676 {
677         struct dma_fence_array *fence_array;
678         int ret = 0;
679         int i;
680
681
682         if (dma_fence_is_signaled(fence))
683                 return 0;
684
685         if (!dma_fence_is_array(fence))
686                 return dma_fence_wait(fence, true);
687
688         /* From i915: Note that if the fence-array was created in
689          * signal-on-any mode, we should *not* decompose it into its individual
690          * fences. However, we don't currently store which mode the fence-array
691          * is operating in. Fortunately, the only user of signal-on-any is
692          * private to amdgpu and we should not see any incoming fence-array
693          * from sync-file being in signal-on-any mode.
694          */
695
696         fence_array = to_dma_fence_array(fence);
697         for (i = 0; i < fence_array->num_fences; i++) {
698                 struct dma_fence *child = fence_array->fences[i];
699
700                 ret = dma_fence_wait(child, true);
701
702                 if (ret < 0)
703                         return ret;
704         }
705
706         return 0;
707 }
708
709
710 /**
711  * vmw_fence_fifo_down - signal all unsignaled fence objects.
712  */
713
714 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
715 {
716         struct list_head action_list;
717         int ret;
718
719         /*
720          * The list may be altered while we traverse it, so always
721          * restart when we've released the fman->lock.
722          */
723
724         spin_lock(&fman->lock);
725         fman->fifo_down = true;
726         while (!list_empty(&fman->fence_list)) {
727                 struct vmw_fence_obj *fence =
728                         list_entry(fman->fence_list.prev, struct vmw_fence_obj,
729                                    head);
730                 dma_fence_get(&fence->base);
731                 spin_unlock(&fman->lock);
732
733                 ret = vmw_fence_obj_wait(fence, false, false,
734                                          VMW_FENCE_WAIT_TIMEOUT);
735
736                 if (unlikely(ret != 0)) {
737                         list_del_init(&fence->head);
738                         dma_fence_signal(&fence->base);
739                         INIT_LIST_HEAD(&action_list);
740                         list_splice_init(&fence->seq_passed_actions,
741                                          &action_list);
742                         vmw_fences_perform_actions(fman, &action_list);
743                 }
744
745                 BUG_ON(!list_empty(&fence->head));
746                 dma_fence_put(&fence->base);
747                 spin_lock(&fman->lock);
748         }
749         spin_unlock(&fman->lock);
750 }
751
752 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
753 {
754         spin_lock(&fman->lock);
755         fman->fifo_down = false;
756         spin_unlock(&fman->lock);
757 }
758
759
760 /**
761  * vmw_fence_obj_lookup - Look up a user-space fence object
762  *
763  * @tfile: A struct ttm_object_file identifying the caller.
764  * @handle: A handle identifying the fence object.
765  * @return: A struct vmw_user_fence base ttm object on success or
766  * an error pointer on failure.
767  *
768  * The fence object is looked up and type-checked. The caller needs
769  * to have opened the fence object first, but since that happens on
770  * creation and fence objects aren't shareable, that's not an
771  * issue currently.
772  */
773 static struct ttm_base_object *
774 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
775 {
776         struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
777
778         if (!base) {
779                 pr_err("Invalid fence object handle 0x%08lx.\n",
780                        (unsigned long)handle);
781                 return ERR_PTR(-EINVAL);
782         }
783
784         if (base->refcount_release != vmw_user_fence_base_release) {
785                 pr_err("Invalid fence object handle 0x%08lx.\n",
786                        (unsigned long)handle);
787                 ttm_base_object_unref(&base);
788                 return ERR_PTR(-EINVAL);
789         }
790
791         return base;
792 }
793
794
795 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
796                              struct drm_file *file_priv)
797 {
798         struct drm_vmw_fence_wait_arg *arg =
799             (struct drm_vmw_fence_wait_arg *)data;
800         unsigned long timeout;
801         struct ttm_base_object *base;
802         struct vmw_fence_obj *fence;
803         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
804         int ret;
805         uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
806
807         /*
808          * 64-bit division not present on 32-bit systems, so do an
809          * approximation. (Divide by 1000000).
810          */
811
812         wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
813           (wait_timeout >> 26);
814
815         if (!arg->cookie_valid) {
816                 arg->cookie_valid = 1;
817                 arg->kernel_cookie = jiffies + wait_timeout;
818         }
819
820         base = vmw_fence_obj_lookup(tfile, arg->handle);
821         if (IS_ERR(base))
822                 return PTR_ERR(base);
823
824         fence = &(container_of(base, struct vmw_user_fence, base)->fence);
825
826         timeout = jiffies;
827         if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
828                 ret = ((vmw_fence_obj_signaled(fence)) ?
829                        0 : -EBUSY);
830                 goto out;
831         }
832
833         timeout = (unsigned long)arg->kernel_cookie - timeout;
834
835         ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
836
837 out:
838         ttm_base_object_unref(&base);
839
840         /*
841          * Optionally unref the fence object.
842          */
843
844         if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
845                 return ttm_ref_object_base_unref(tfile, arg->handle,
846                                                  TTM_REF_USAGE);
847         return ret;
848 }
849
850 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
851                                  struct drm_file *file_priv)
852 {
853         struct drm_vmw_fence_signaled_arg *arg =
854                 (struct drm_vmw_fence_signaled_arg *) data;
855         struct ttm_base_object *base;
856         struct vmw_fence_obj *fence;
857         struct vmw_fence_manager *fman;
858         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
859         struct vmw_private *dev_priv = vmw_priv(dev);
860
861         base = vmw_fence_obj_lookup(tfile, arg->handle);
862         if (IS_ERR(base))
863                 return PTR_ERR(base);
864
865         fence = &(container_of(base, struct vmw_user_fence, base)->fence);
866         fman = fman_from_fence(fence);
867
868         arg->signaled = vmw_fence_obj_signaled(fence);
869
870         arg->signaled_flags = arg->flags;
871         spin_lock(&fman->lock);
872         arg->passed_seqno = dev_priv->last_read_seqno;
873         spin_unlock(&fman->lock);
874
875         ttm_base_object_unref(&base);
876
877         return 0;
878 }
879
880
881 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
882                               struct drm_file *file_priv)
883 {
884         struct drm_vmw_fence_arg *arg =
885                 (struct drm_vmw_fence_arg *) data;
886
887         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
888                                          arg->handle,
889                                          TTM_REF_USAGE);
890 }
891
892 /**
893  * vmw_event_fence_action_seq_passed
894  *
895  * @action: The struct vmw_fence_action embedded in a struct
896  * vmw_event_fence_action.
897  *
898  * This function is called when the seqno of the fence where @action is
899  * attached has passed. It queues the event on the submitter's event list.
900  * This function is always called from atomic context.
901  */
902 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
903 {
904         struct vmw_event_fence_action *eaction =
905                 container_of(action, struct vmw_event_fence_action, action);
906         struct drm_device *dev = eaction->dev;
907         struct drm_pending_event *event = eaction->event;
908         struct drm_file *file_priv;
909
910
911         if (unlikely(event == NULL))
912                 return;
913
914         file_priv = event->file_priv;
915         spin_lock_irq(&dev->event_lock);
916
917         if (likely(eaction->tv_sec != NULL)) {
918                 struct timespec64 ts;
919
920                 ktime_get_ts64(&ts);
921                 /* monotonic time, so no y2038 overflow */
922                 *eaction->tv_sec = ts.tv_sec;
923                 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
924         }
925
926         drm_send_event_locked(dev, eaction->event);
927         eaction->event = NULL;
928         spin_unlock_irq(&dev->event_lock);
929 }
930
931 /**
932  * vmw_event_fence_action_cleanup
933  *
934  * @action: The struct vmw_fence_action embedded in a struct
935  * vmw_event_fence_action.
936  *
937  * This function is the struct vmw_fence_action destructor. It's typically
938  * called from a workqueue.
939  */
940 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
941 {
942         struct vmw_event_fence_action *eaction =
943                 container_of(action, struct vmw_event_fence_action, action);
944
945         vmw_fence_obj_unreference(&eaction->fence);
946         kfree(eaction);
947 }
948
949
950 /**
951  * vmw_fence_obj_add_action - Add an action to a fence object.
952  *
953  * @fence - The fence object.
954  * @action - The action to add.
955  *
956  * Note that the action callbacks may be executed before this function
957  * returns.
958  */
959 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
960                               struct vmw_fence_action *action)
961 {
962         struct vmw_fence_manager *fman = fman_from_fence(fence);
963         bool run_update = false;
964
965         mutex_lock(&fman->goal_irq_mutex);
966         spin_lock(&fman->lock);
967
968         fman->pending_actions[action->type]++;
969         if (dma_fence_is_signaled_locked(&fence->base)) {
970                 struct list_head action_list;
971
972                 INIT_LIST_HEAD(&action_list);
973                 list_add_tail(&action->head, &action_list);
974                 vmw_fences_perform_actions(fman, &action_list);
975         } else {
976                 list_add_tail(&action->head, &fence->seq_passed_actions);
977
978                 /*
979                  * This function may set fman::seqno_valid, so it must
980                  * be run with the goal_irq_mutex held.
981                  */
982                 run_update = vmw_fence_goal_check_locked(fence);
983         }
984
985         spin_unlock(&fman->lock);
986
987         if (run_update) {
988                 if (!fman->goal_irq_on) {
989                         fman->goal_irq_on = true;
990                         vmw_goal_waiter_add(fman->dev_priv);
991                 }
992                 vmw_fences_update(fman);
993         }
994         mutex_unlock(&fman->goal_irq_mutex);
995
996 }
997
998 /**
999  * vmw_event_fence_action_create - Post an event for sending when a fence
1000  * object seqno has passed.
1001  *
1002  * @file_priv: The file connection on which the event should be posted.
1003  * @fence: The fence object on which to post the event.
1004  * @event: Event to be posted. This event should've been alloced
1005  * using k[mz]alloc, and should've been completely initialized.
1006  * @interruptible: Interruptible waits if possible.
1007  *
1008  * As a side effect, the object pointed to by @event may have been
1009  * freed when this function returns. If this function returns with
1010  * an error code, the caller needs to free that object.
1011  */
1012
1013 int vmw_event_fence_action_queue(struct drm_file *file_priv,
1014                                  struct vmw_fence_obj *fence,
1015                                  struct drm_pending_event *event,
1016                                  uint32_t *tv_sec,
1017                                  uint32_t *tv_usec,
1018                                  bool interruptible)
1019 {
1020         struct vmw_event_fence_action *eaction;
1021         struct vmw_fence_manager *fman = fman_from_fence(fence);
1022
1023         eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1024         if (unlikely(!eaction))
1025                 return -ENOMEM;
1026
1027         eaction->event = event;
1028
1029         eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1030         eaction->action.cleanup = vmw_event_fence_action_cleanup;
1031         eaction->action.type = VMW_ACTION_EVENT;
1032
1033         eaction->fence = vmw_fence_obj_reference(fence);
1034         eaction->dev = fman->dev_priv->dev;
1035         eaction->tv_sec = tv_sec;
1036         eaction->tv_usec = tv_usec;
1037
1038         vmw_fence_obj_add_action(fence, &eaction->action);
1039
1040         return 0;
1041 }
1042
1043 struct vmw_event_fence_pending {
1044         struct drm_pending_event base;
1045         struct drm_vmw_event_fence event;
1046 };
1047
1048 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1049                                   struct vmw_fence_obj *fence,
1050                                   uint32_t flags,
1051                                   uint64_t user_data,
1052                                   bool interruptible)
1053 {
1054         struct vmw_event_fence_pending *event;
1055         struct vmw_fence_manager *fman = fman_from_fence(fence);
1056         struct drm_device *dev = fman->dev_priv->dev;
1057         int ret;
1058
1059         event = kzalloc(sizeof(*event), GFP_KERNEL);
1060         if (unlikely(!event)) {
1061                 DRM_ERROR("Failed to allocate an event.\n");
1062                 ret = -ENOMEM;
1063                 goto out_no_space;
1064         }
1065
1066         event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1067         event->event.base.length = sizeof(*event);
1068         event->event.user_data = user_data;
1069
1070         ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1071
1072         if (unlikely(ret != 0)) {
1073                 DRM_ERROR("Failed to allocate event space for this file.\n");
1074                 kfree(event);
1075                 goto out_no_space;
1076         }
1077
1078         if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1079                 ret = vmw_event_fence_action_queue(file_priv, fence,
1080                                                    &event->base,
1081                                                    &event->event.tv_sec,
1082                                                    &event->event.tv_usec,
1083                                                    interruptible);
1084         else
1085                 ret = vmw_event_fence_action_queue(file_priv, fence,
1086                                                    &event->base,
1087                                                    NULL,
1088                                                    NULL,
1089                                                    interruptible);
1090         if (ret != 0)
1091                 goto out_no_queue;
1092
1093         return 0;
1094
1095 out_no_queue:
1096         drm_event_cancel_free(dev, &event->base);
1097 out_no_space:
1098         return ret;
1099 }
1100
1101 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1102                           struct drm_file *file_priv)
1103 {
1104         struct vmw_private *dev_priv = vmw_priv(dev);
1105         struct drm_vmw_fence_event_arg *arg =
1106                 (struct drm_vmw_fence_event_arg *) data;
1107         struct vmw_fence_obj *fence = NULL;
1108         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1109         struct ttm_object_file *tfile = vmw_fp->tfile;
1110         struct drm_vmw_fence_rep __user *user_fence_rep =
1111                 (struct drm_vmw_fence_rep __user *)(unsigned long)
1112                 arg->fence_rep;
1113         uint32_t handle;
1114         int ret;
1115
1116         /*
1117          * Look up an existing fence object,
1118          * and if user-space wants a new reference,
1119          * add one.
1120          */
1121         if (arg->handle) {
1122                 struct ttm_base_object *base =
1123                         vmw_fence_obj_lookup(tfile, arg->handle);
1124
1125                 if (IS_ERR(base))
1126                         return PTR_ERR(base);
1127
1128                 fence = &(container_of(base, struct vmw_user_fence,
1129                                        base)->fence);
1130                 (void) vmw_fence_obj_reference(fence);
1131
1132                 if (user_fence_rep != NULL) {
1133                         ret = ttm_ref_object_add(vmw_fp->tfile, base,
1134                                                  TTM_REF_USAGE, NULL, false);
1135                         if (unlikely(ret != 0)) {
1136                                 DRM_ERROR("Failed to reference a fence "
1137                                           "object.\n");
1138                                 goto out_no_ref_obj;
1139                         }
1140                         handle = base->hash.key;
1141                 }
1142                 ttm_base_object_unref(&base);
1143         }
1144
1145         /*
1146          * Create a new fence object.
1147          */
1148         if (!fence) {
1149                 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1150                                                  &fence,
1151                                                  (user_fence_rep) ?
1152                                                  &handle : NULL);
1153                 if (unlikely(ret != 0)) {
1154                         DRM_ERROR("Fence event failed to create fence.\n");
1155                         return ret;
1156                 }
1157         }
1158
1159         BUG_ON(fence == NULL);
1160
1161         ret = vmw_event_fence_action_create(file_priv, fence,
1162                                             arg->flags,
1163                                             arg->user_data,
1164                                             true);
1165         if (unlikely(ret != 0)) {
1166                 if (ret != -ERESTARTSYS)
1167                         DRM_ERROR("Failed to attach event to fence.\n");
1168                 goto out_no_create;
1169         }
1170
1171         vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1172                                     handle, -1);
1173         vmw_fence_obj_unreference(&fence);
1174         return 0;
1175 out_no_create:
1176         if (user_fence_rep != NULL)
1177                 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1178 out_no_ref_obj:
1179         vmw_fence_obj_unreference(&fence);
1180         return ret;
1181 }