2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <linux/ktime.h>
28 #include <linux/hrtimer.h>
29 #include <linux/sched/signal.h>
30 #include <trace/events/dma_fence.h>
32 #include <nvif/cl826e.h>
33 #include <nvif/notify.h>
34 #include <nvif/event.h>
36 #include "nouveau_drv.h"
37 #include "nouveau_dma.h"
38 #include "nouveau_fence.h"
40 static const struct dma_fence_ops nouveau_fence_ops_uevent;
41 static const struct dma_fence_ops nouveau_fence_ops_legacy;
43 static inline struct nouveau_fence *
44 from_fence(struct dma_fence *fence)
46 return container_of(fence, struct nouveau_fence, base);
49 static inline struct nouveau_fence_chan *
50 nouveau_fctx(struct nouveau_fence *fence)
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
56 nouveau_fence_signal(struct nouveau_fence *fence)
60 dma_fence_signal_locked(&fence->base);
61 list_del(&fence->head);
62 rcu_assign_pointer(fence->channel, NULL);
64 if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
65 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
67 if (!--fctx->notify_ref)
71 dma_fence_put(&fence->base);
75 static struct nouveau_fence *
76 nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
78 if (fence->ops != &nouveau_fence_ops_legacy &&
79 fence->ops != &nouveau_fence_ops_uevent)
82 if (fence->context < drm->chan.context_base ||
83 fence->context >= drm->chan.context_base + drm->chan.nr)
86 return from_fence(fence);
90 nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
92 struct nouveau_fence *fence;
94 spin_lock_irq(&fctx->lock);
95 while (!list_empty(&fctx->pending)) {
96 fence = list_entry(fctx->pending.next, typeof(*fence), head);
99 dma_fence_set_error(&fence->base, error);
101 if (nouveau_fence_signal(fence))
102 nvif_notify_put(&fctx->notify);
104 spin_unlock_irq(&fctx->lock);
108 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
110 nouveau_fence_context_kill(fctx, 0);
111 nvif_notify_dtor(&fctx->notify);
115 * Ensure that all accesses to fence->channel complete before freeing
122 nouveau_fence_context_put(struct kref *fence_ref)
124 kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref));
128 nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
130 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
134 nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
136 struct nouveau_fence *fence;
138 u32 seq = fctx->read(chan);
140 while (!list_empty(&fctx->pending)) {
141 fence = list_entry(fctx->pending.next, typeof(*fence), head);
143 if ((int)(seq - fence->base.seqno) < 0)
146 drop |= nouveau_fence_signal(fence);
153 nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
155 struct nouveau_fence_chan *fctx =
156 container_of(notify, typeof(*fctx), notify);
158 int ret = NVIF_NOTIFY_KEEP;
160 spin_lock_irqsave(&fctx->lock, flags);
161 if (!list_empty(&fctx->pending)) {
162 struct nouveau_fence *fence;
163 struct nouveau_channel *chan;
165 fence = list_entry(fctx->pending.next, typeof(*fence), head);
166 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
167 if (nouveau_fence_update(chan, fctx))
168 ret = NVIF_NOTIFY_DROP;
170 spin_unlock_irqrestore(&fctx->lock, flags);
176 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
178 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
179 struct nouveau_cli *cli = (void *)chan->user.client;
182 INIT_LIST_HEAD(&fctx->flip);
183 INIT_LIST_HEAD(&fctx->pending);
184 spin_lock_init(&fctx->lock);
185 fctx->context = chan->drm->chan.context_base + chan->chid;
187 if (chan == chan->drm->cechan)
188 strcpy(fctx->name, "copy engine channel");
189 else if (chan == chan->drm->channel)
190 strcpy(fctx->name, "generic kernel channel");
192 strcpy(fctx->name, nvxx_client(&cli->base)->name);
194 kref_init(&fctx->fence_ref);
198 ret = nvif_notify_ctor(&chan->user, "fenceNonStallIntr",
199 nouveau_fence_wait_uevent_handler,
200 false, NV826E_V0_NTFY_NON_STALL_INTERRUPT,
201 &(struct nvif_notify_uevent_req) { },
202 sizeof(struct nvif_notify_uevent_req),
203 sizeof(struct nvif_notify_uevent_rep),
210 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
212 struct nouveau_fence_chan *fctx = chan->fence;
213 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
216 fence->channel = chan;
217 fence->timeout = jiffies + (15 * HZ);
220 dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
221 &fctx->lock, fctx->context, ++fctx->sequence);
223 dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
224 &fctx->lock, fctx->context, ++fctx->sequence);
225 kref_get(&fctx->fence_ref);
227 ret = fctx->emit(fence);
229 dma_fence_get(&fence->base);
230 spin_lock_irq(&fctx->lock);
232 if (nouveau_fence_update(chan, fctx))
233 nvif_notify_put(&fctx->notify);
235 list_add_tail(&fence->head, &fctx->pending);
236 spin_unlock_irq(&fctx->lock);
243 nouveau_fence_done(struct nouveau_fence *fence)
245 if (fence->base.ops == &nouveau_fence_ops_legacy ||
246 fence->base.ops == &nouveau_fence_ops_uevent) {
247 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
248 struct nouveau_channel *chan;
251 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
254 spin_lock_irqsave(&fctx->lock, flags);
255 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
256 if (chan && nouveau_fence_update(chan, fctx))
257 nvif_notify_put(&fctx->notify);
258 spin_unlock_irqrestore(&fctx->lock, flags);
260 return dma_fence_is_signaled(&fence->base);
264 nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
266 struct nouveau_fence *fence = from_fence(f);
267 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
268 unsigned long t = jiffies, timeout = t + wait;
270 while (!nouveau_fence_done(fence)) {
275 if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
276 __set_current_state(TASK_RUNNING);
280 __set_current_state(intr ? TASK_INTERRUPTIBLE :
281 TASK_UNINTERRUPTIBLE);
284 schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
286 if (sleep_time > NSEC_PER_MSEC)
287 sleep_time = NSEC_PER_MSEC;
289 if (intr && signal_pending(current))
293 __set_current_state(TASK_RUNNING);
299 nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
303 while (!nouveau_fence_done(fence)) {
304 if (time_after_eq(jiffies, fence->timeout)) {
309 __set_current_state(intr ?
311 TASK_UNINTERRUPTIBLE);
313 if (intr && signal_pending(current)) {
319 __set_current_state(TASK_RUNNING);
324 nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
329 return nouveau_fence_wait_busy(fence, intr);
331 ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
341 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
342 bool exclusive, bool intr)
344 struct nouveau_fence_chan *fctx = chan->fence;
345 struct dma_resv *resv = nvbo->bo.base.resv;
348 ret = dma_resv_reserve_fences(resv, 1);
352 /* Waiting for the writes first causes performance regressions
353 * under some circumstances. So manually wait for the reads first.
355 for (i = 0; i < 2; ++i) {
356 struct dma_resv_iter cursor;
357 struct dma_fence *fence;
359 dma_resv_for_each_fence(&cursor, resv,
360 dma_resv_usage_rw(exclusive),
362 enum dma_resv_usage usage;
363 struct nouveau_fence *f;
365 usage = dma_resv_iter_usage(&cursor);
366 if (i == 0 && usage == DMA_RESV_USAGE_WRITE)
369 f = nouveau_local_fence(fence, chan->drm);
371 struct nouveau_channel *prev;
372 bool must_wait = true;
375 prev = rcu_dereference(f->channel);
376 if (prev && (prev == chan ||
377 fctx->sync(f, prev, chan) == 0))
384 ret = dma_fence_wait(fence, intr);
394 nouveau_fence_unref(struct nouveau_fence **pfence)
397 dma_fence_put(&(*pfence)->base);
402 nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
403 struct nouveau_fence **pfence)
405 struct nouveau_fence *fence;
408 if (unlikely(!chan->fence))
411 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
415 ret = nouveau_fence_emit(fence, chan);
417 nouveau_fence_unref(&fence);
423 static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
428 static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
430 struct nouveau_fence *fence = from_fence(f);
431 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
433 return !fctx->dead ? fctx->name : "dead channel";
437 * In an ideal world, read would not assume the channel context is still alive.
438 * This function may be called from another device, running into free memory as a
439 * result. The drm node should still be there, so we can derive the index from
442 static bool nouveau_fence_is_signaled(struct dma_fence *f)
444 struct nouveau_fence *fence = from_fence(f);
445 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
446 struct nouveau_channel *chan;
450 chan = rcu_dereference(fence->channel);
452 ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
458 static bool nouveau_fence_no_signaling(struct dma_fence *f)
460 struct nouveau_fence *fence = from_fence(f);
463 * caller should have a reference on the fence,
464 * else fence could get freed here
466 WARN_ON(kref_read(&fence->base.refcount) <= 1);
469 * This needs uevents to work correctly, but dma_fence_add_callback relies on
470 * being able to enable signaling. It will still get signaled eventually,
471 * just not right away.
473 if (nouveau_fence_is_signaled(f)) {
474 list_del(&fence->head);
476 dma_fence_put(&fence->base);
483 static void nouveau_fence_release(struct dma_fence *f)
485 struct nouveau_fence *fence = from_fence(f);
486 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
488 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
489 dma_fence_free(&fence->base);
492 static const struct dma_fence_ops nouveau_fence_ops_legacy = {
493 .get_driver_name = nouveau_fence_get_get_driver_name,
494 .get_timeline_name = nouveau_fence_get_timeline_name,
495 .enable_signaling = nouveau_fence_no_signaling,
496 .signaled = nouveau_fence_is_signaled,
497 .wait = nouveau_fence_wait_legacy,
498 .release = nouveau_fence_release
501 static bool nouveau_fence_enable_signaling(struct dma_fence *f)
503 struct nouveau_fence *fence = from_fence(f);
504 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
507 if (!fctx->notify_ref++)
508 nvif_notify_get(&fctx->notify);
510 ret = nouveau_fence_no_signaling(f);
512 set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
513 else if (!--fctx->notify_ref)
514 nvif_notify_put(&fctx->notify);
519 static const struct dma_fence_ops nouveau_fence_ops_uevent = {
520 .get_driver_name = nouveau_fence_get_get_driver_name,
521 .get_timeline_name = nouveau_fence_get_timeline_name,
522 .enable_signaling = nouveau_fence_enable_signaling,
523 .signaled = nouveau_fence_is_signaled,
524 .release = nouveau_fence_release