GNU Linux-libre 4.9.328-gnu1
[releases.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "gpu_scheduler.h"
29
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
32
33 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
35 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
36
37 /* Initialize a given run queue struct */
38 static void amd_sched_rq_init(struct amd_sched_rq *rq)
39 {
40         spin_lock_init(&rq->lock);
41         INIT_LIST_HEAD(&rq->entities);
42         rq->current_entity = NULL;
43 }
44
45 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
46                                     struct amd_sched_entity *entity)
47 {
48         if (!list_empty(&entity->list))
49                 return;
50         spin_lock(&rq->lock);
51         list_add_tail(&entity->list, &rq->entities);
52         spin_unlock(&rq->lock);
53 }
54
55 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
56                                        struct amd_sched_entity *entity)
57 {
58         if (list_empty(&entity->list))
59                 return;
60         spin_lock(&rq->lock);
61         list_del_init(&entity->list);
62         if (rq->current_entity == entity)
63                 rq->current_entity = NULL;
64         spin_unlock(&rq->lock);
65 }
66
67 /**
68  * Select an entity which could provide a job to run
69  *
70  * @rq          The run queue to check.
71  *
72  * Try to find a ready entity, returns NULL if none found.
73  */
74 static struct amd_sched_entity *
75 amd_sched_rq_select_entity(struct amd_sched_rq *rq)
76 {
77         struct amd_sched_entity *entity;
78
79         spin_lock(&rq->lock);
80
81         entity = rq->current_entity;
82         if (entity) {
83                 list_for_each_entry_continue(entity, &rq->entities, list) {
84                         if (amd_sched_entity_is_ready(entity)) {
85                                 rq->current_entity = entity;
86                                 spin_unlock(&rq->lock);
87                                 return entity;
88                         }
89                 }
90         }
91
92         list_for_each_entry(entity, &rq->entities, list) {
93
94                 if (amd_sched_entity_is_ready(entity)) {
95                         rq->current_entity = entity;
96                         spin_unlock(&rq->lock);
97                         return entity;
98                 }
99
100                 if (entity == rq->current_entity)
101                         break;
102         }
103
104         spin_unlock(&rq->lock);
105
106         return NULL;
107 }
108
109 /**
110  * Init a context entity used by scheduler when submit to HW ring.
111  *
112  * @sched       The pointer to the scheduler
113  * @entity      The pointer to a valid amd_sched_entity
114  * @rq          The run queue this entity belongs
115  * @kernel      If this is an entity for the kernel
116  * @jobs        The max number of jobs in the job queue
117  *
118  * return 0 if succeed. negative error code on failure
119 */
120 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
121                           struct amd_sched_entity *entity,
122                           struct amd_sched_rq *rq,
123                           uint32_t jobs)
124 {
125         int r;
126
127         if (!(sched && entity && rq))
128                 return -EINVAL;
129
130         memset(entity, 0, sizeof(struct amd_sched_entity));
131         INIT_LIST_HEAD(&entity->list);
132         entity->rq = rq;
133         entity->sched = sched;
134
135         spin_lock_init(&entity->queue_lock);
136         r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
137         if (r)
138                 return r;
139
140         atomic_set(&entity->fence_seq, 0);
141         entity->fence_context = fence_context_alloc(2);
142
143         return 0;
144 }
145
146 /**
147  * Query if entity is initialized
148  *
149  * @sched       Pointer to scheduler instance
150  * @entity      The pointer to a valid scheduler entity
151  *
152  * return true if entity is initialized, false otherwise
153 */
154 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
155                                             struct amd_sched_entity *entity)
156 {
157         return entity->sched == sched &&
158                 entity->rq != NULL;
159 }
160
161 /**
162  * Check if entity is idle
163  *
164  * @entity      The pointer to a valid scheduler entity
165  *
166  * Return true if entity don't has any unscheduled jobs.
167  */
168 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
169 {
170         rmb();
171         if (kfifo_is_empty(&entity->job_queue))
172                 return true;
173
174         return false;
175 }
176
177 /**
178  * Check if entity is ready
179  *
180  * @entity      The pointer to a valid scheduler entity
181  *
182  * Return true if entity could provide a job.
183  */
184 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
185 {
186         if (kfifo_is_empty(&entity->job_queue))
187                 return false;
188
189         if (ACCESS_ONCE(entity->dependency))
190                 return false;
191
192         return true;
193 }
194
195 /**
196  * Destroy a context entity
197  *
198  * @sched       Pointer to scheduler instance
199  * @entity      The pointer to a valid scheduler entity
200  *
201  * Cleanup and free the allocated resources.
202  */
203 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
204                            struct amd_sched_entity *entity)
205 {
206         struct amd_sched_rq *rq = entity->rq;
207
208         if (!amd_sched_entity_is_initialized(sched, entity))
209                 return;
210
211         /**
212          * The client will not queue more IBs during this fini, consume existing
213          * queued IBs
214         */
215         wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
216
217         amd_sched_rq_remove_entity(rq, entity);
218         kfifo_free(&entity->job_queue);
219 }
220
221 static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
222 {
223         struct amd_sched_entity *entity =
224                 container_of(cb, struct amd_sched_entity, cb);
225         entity->dependency = NULL;
226         fence_put(f);
227         amd_sched_wakeup(entity->sched);
228 }
229
230 static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
231 {
232         struct amd_sched_entity *entity =
233                 container_of(cb, struct amd_sched_entity, cb);
234         entity->dependency = NULL;
235         fence_put(f);
236 }
237
238 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
239 {
240         struct amd_gpu_scheduler *sched = entity->sched;
241         struct fence * fence = entity->dependency;
242         struct amd_sched_fence *s_fence;
243
244         if (fence->context == entity->fence_context) {
245                 /* We can ignore fences from ourself */
246                 fence_put(entity->dependency);
247                 return false;
248         }
249
250         s_fence = to_amd_sched_fence(fence);
251         if (s_fence && s_fence->sched == sched) {
252
253                 /*
254                  * Fence is from the same scheduler, only need to wait for
255                  * it to be scheduled
256                  */
257                 fence = fence_get(&s_fence->scheduled);
258                 fence_put(entity->dependency);
259                 entity->dependency = fence;
260                 if (!fence_add_callback(fence, &entity->cb,
261                                         amd_sched_entity_clear_dep))
262                         return true;
263
264                 /* Ignore it when it is already scheduled */
265                 fence_put(fence);
266                 return false;
267         }
268
269         if (!fence_add_callback(entity->dependency, &entity->cb,
270                                 amd_sched_entity_wakeup))
271                 return true;
272
273         fence_put(entity->dependency);
274         return false;
275 }
276
277 static struct amd_sched_job *
278 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
279 {
280         struct amd_gpu_scheduler *sched = entity->sched;
281         struct amd_sched_job *sched_job;
282
283         if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
284                 return NULL;
285
286         while ((entity->dependency = sched->ops->dependency(sched_job)))
287                 if (amd_sched_entity_add_dependency_cb(entity))
288                         return NULL;
289
290         return sched_job;
291 }
292
293 /**
294  * Helper to submit a job to the job queue
295  *
296  * @sched_job           The pointer to job required to submit
297  *
298  * Returns true if we could submit the job.
299  */
300 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
301 {
302         struct amd_gpu_scheduler *sched = sched_job->sched;
303         struct amd_sched_entity *entity = sched_job->s_entity;
304         bool added, first = false;
305
306         spin_lock(&entity->queue_lock);
307         added = kfifo_in(&entity->job_queue, &sched_job,
308                         sizeof(sched_job)) == sizeof(sched_job);
309
310         if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
311                 first = true;
312
313         spin_unlock(&entity->queue_lock);
314
315         /* first job wakes up scheduler */
316         if (first) {
317                 /* Add the entity to the run queue */
318                 amd_sched_rq_add_entity(entity->rq, entity);
319                 amd_sched_wakeup(sched);
320         }
321         return added;
322 }
323
324 /* job_finish is called after hw fence signaled, and
325  * the job had already been deleted from ring_mirror_list
326  */
327 static void amd_sched_job_finish(struct work_struct *work)
328 {
329         struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
330                                                    finish_work);
331         struct amd_gpu_scheduler *sched = s_job->sched;
332
333         /* remove job from ring_mirror_list */
334         spin_lock(&sched->job_list_lock);
335         list_del_init(&s_job->node);
336         if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
337                 struct amd_sched_job *next;
338
339                 spin_unlock(&sched->job_list_lock);
340                 cancel_delayed_work_sync(&s_job->work_tdr);
341                 spin_lock(&sched->job_list_lock);
342
343                 /* queue TDR for next job */
344                 next = list_first_entry_or_null(&sched->ring_mirror_list,
345                                                 struct amd_sched_job, node);
346
347                 if (next)
348                         schedule_delayed_work(&next->work_tdr, sched->timeout);
349         }
350         spin_unlock(&sched->job_list_lock);
351         sched->ops->free_job(s_job);
352 }
353
354 static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
355 {
356         struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
357                                                  finish_cb);
358         schedule_work(&job->finish_work);
359 }
360
361 static void amd_sched_job_begin(struct amd_sched_job *s_job)
362 {
363         struct amd_gpu_scheduler *sched = s_job->sched;
364
365         spin_lock(&sched->job_list_lock);
366         list_add_tail(&s_job->node, &sched->ring_mirror_list);
367         if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
368             list_first_entry_or_null(&sched->ring_mirror_list,
369                                      struct amd_sched_job, node) == s_job)
370                 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
371         spin_unlock(&sched->job_list_lock);
372 }
373
374 static void amd_sched_job_timedout(struct work_struct *work)
375 {
376         struct amd_sched_job *job = container_of(work, struct amd_sched_job,
377                                                  work_tdr.work);
378
379         job->sched->ops->timedout_job(job);
380 }
381
382 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
383 {
384         struct amd_sched_job *s_job;
385
386         spin_lock(&sched->job_list_lock);
387         list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
388                 if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
389                         fence_put(s_job->s_fence->parent);
390                         s_job->s_fence->parent = NULL;
391                 }
392         }
393         atomic_set(&sched->hw_rq_count, 0);
394         spin_unlock(&sched->job_list_lock);
395 }
396
397 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
398 {
399         struct amd_sched_job *s_job, *tmp;
400         int r;
401
402         spin_lock(&sched->job_list_lock);
403         s_job = list_first_entry_or_null(&sched->ring_mirror_list,
404                                          struct amd_sched_job, node);
405         if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
406                 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
407
408         list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
409                 struct amd_sched_fence *s_fence = s_job->s_fence;
410                 struct fence *fence;
411
412                 spin_unlock(&sched->job_list_lock);
413                 fence = sched->ops->run_job(s_job);
414                 atomic_inc(&sched->hw_rq_count);
415                 if (fence) {
416                         s_fence->parent = fence_get(fence);
417                         r = fence_add_callback(fence, &s_fence->cb,
418                                                amd_sched_process_job);
419                         if (r == -ENOENT)
420                                 amd_sched_process_job(fence, &s_fence->cb);
421                         else if (r)
422                                 DRM_ERROR("fence add callback failed (%d)\n",
423                                           r);
424                         fence_put(fence);
425                 } else {
426                         DRM_ERROR("Failed to run job!\n");
427                         amd_sched_process_job(NULL, &s_fence->cb);
428                 }
429                 spin_lock(&sched->job_list_lock);
430         }
431         spin_unlock(&sched->job_list_lock);
432 }
433
434 /**
435  * Submit a job to the job queue
436  *
437  * @sched_job           The pointer to job required to submit
438  *
439  * Returns 0 for success, negative error code otherwise.
440  */
441 void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
442 {
443         struct amd_sched_entity *entity = sched_job->s_entity;
444
445         trace_amd_sched_job(sched_job);
446         fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
447                            amd_sched_job_finish_cb);
448         wait_event(entity->sched->job_scheduled,
449                    amd_sched_entity_in(sched_job));
450 }
451
452 /* init a sched_job with basic field */
453 int amd_sched_job_init(struct amd_sched_job *job,
454                        struct amd_gpu_scheduler *sched,
455                        struct amd_sched_entity *entity,
456                        void *owner)
457 {
458         job->sched = sched;
459         job->s_entity = entity;
460         job->s_fence = amd_sched_fence_create(entity, owner);
461         if (!job->s_fence)
462                 return -ENOMEM;
463
464         INIT_WORK(&job->finish_work, amd_sched_job_finish);
465         INIT_LIST_HEAD(&job->node);
466         INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
467
468         return 0;
469 }
470
471 /**
472  * Return ture if we can push more jobs to the hw.
473  */
474 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
475 {
476         return atomic_read(&sched->hw_rq_count) <
477                 sched->hw_submission_limit;
478 }
479
480 /**
481  * Wake up the scheduler when it is ready
482  */
483 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
484 {
485         if (amd_sched_ready(sched))
486                 wake_up_interruptible(&sched->wake_up_worker);
487 }
488
489 /**
490  * Select next entity to process
491 */
492 static struct amd_sched_entity *
493 amd_sched_select_entity(struct amd_gpu_scheduler *sched)
494 {
495         struct amd_sched_entity *entity;
496         int i;
497
498         if (!amd_sched_ready(sched))
499                 return NULL;
500
501         /* Kernel run queue has higher priority than normal run queue*/
502         for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
503                 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
504                 if (entity)
505                         break;
506         }
507
508         return entity;
509 }
510
511 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
512 {
513         struct amd_sched_fence *s_fence =
514                 container_of(cb, struct amd_sched_fence, cb);
515         struct amd_gpu_scheduler *sched = s_fence->sched;
516
517         atomic_dec(&sched->hw_rq_count);
518         amd_sched_fence_finished(s_fence);
519
520         trace_amd_sched_process_job(s_fence);
521         fence_put(&s_fence->finished);
522         wake_up_interruptible(&sched->wake_up_worker);
523 }
524
525 static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
526 {
527         if (kthread_should_park()) {
528                 kthread_parkme();
529                 return true;
530         }
531
532         return false;
533 }
534
535 static int amd_sched_main(void *param)
536 {
537         struct sched_param sparam = {.sched_priority = 1};
538         struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
539         int r, count;
540
541         sched_setscheduler(current, SCHED_FIFO, &sparam);
542
543         while (!kthread_should_stop()) {
544                 struct amd_sched_entity *entity = NULL;
545                 struct amd_sched_fence *s_fence;
546                 struct amd_sched_job *sched_job;
547                 struct fence *fence;
548
549                 wait_event_interruptible(sched->wake_up_worker,
550                                          (!amd_sched_blocked(sched) &&
551                                           (entity = amd_sched_select_entity(sched))) ||
552                                          kthread_should_stop());
553
554                 if (!entity)
555                         continue;
556
557                 sched_job = amd_sched_entity_pop_job(entity);
558                 if (!sched_job)
559                         continue;
560
561                 s_fence = sched_job->s_fence;
562
563                 atomic_inc(&sched->hw_rq_count);
564                 amd_sched_job_begin(sched_job);
565
566                 fence = sched->ops->run_job(sched_job);
567                 amd_sched_fence_scheduled(s_fence);
568                 if (fence) {
569                         s_fence->parent = fence_get(fence);
570                         r = fence_add_callback(fence, &s_fence->cb,
571                                                amd_sched_process_job);
572                         if (r == -ENOENT)
573                                 amd_sched_process_job(fence, &s_fence->cb);
574                         else if (r)
575                                 DRM_ERROR("fence add callback failed (%d)\n",
576                                           r);
577                         fence_put(fence);
578                 } else {
579                         DRM_ERROR("Failed to run job!\n");
580                         amd_sched_process_job(NULL, &s_fence->cb);
581                 }
582
583                 count = kfifo_out(&entity->job_queue, &sched_job,
584                                 sizeof(sched_job));
585                 WARN_ON(count != sizeof(sched_job));
586                 wake_up(&sched->job_scheduled);
587         }
588         return 0;
589 }
590
591 /**
592  * Init a gpu scheduler instance
593  *
594  * @sched               The pointer to the scheduler
595  * @ops                 The backend operations for this scheduler.
596  * @hw_submissions      Number of hw submissions to do.
597  * @name                Name used for debugging
598  *
599  * Return 0 on success, otherwise error code.
600 */
601 int amd_sched_init(struct amd_gpu_scheduler *sched,
602                    const struct amd_sched_backend_ops *ops,
603                    unsigned hw_submission, long timeout, const char *name)
604 {
605         int i;
606         sched->ops = ops;
607         sched->hw_submission_limit = hw_submission;
608         sched->name = name;
609         sched->timeout = timeout;
610         for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
611                 amd_sched_rq_init(&sched->sched_rq[i]);
612
613         init_waitqueue_head(&sched->wake_up_worker);
614         init_waitqueue_head(&sched->job_scheduled);
615         INIT_LIST_HEAD(&sched->ring_mirror_list);
616         spin_lock_init(&sched->job_list_lock);
617         atomic_set(&sched->hw_rq_count, 0);
618
619         /* Each scheduler will run on a seperate kernel thread */
620         sched->thread = kthread_run(amd_sched_main, sched, sched->name);
621         if (IS_ERR(sched->thread)) {
622                 DRM_ERROR("Failed to create scheduler for %s.\n", name);
623                 return PTR_ERR(sched->thread);
624         }
625
626         return 0;
627 }
628
629 /**
630  * Destroy a gpu scheduler
631  *
632  * @sched       The pointer to the scheduler
633  */
634 void amd_sched_fini(struct amd_gpu_scheduler *sched)
635 {
636         if (sched->thread)
637                 kthread_stop(sched->thread);
638 }