1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2019 Collabora ltd. */
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/dma-resv.h>
10 #include <drm/gpu_scheduler.h>
11 #include <drm/panfrost_drm.h>
13 #include "panfrost_device.h"
14 #include "panfrost_devfreq.h"
15 #include "panfrost_job.h"
16 #include "panfrost_features.h"
17 #include "panfrost_issues.h"
18 #include "panfrost_gem.h"
19 #include "panfrost_regs.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_mmu.h"
23 #define JOB_TIMEOUT_MS 500
25 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
26 #define job_read(dev, reg) readl(dev->iomem + (reg))
28 enum panfrost_queue_status {
29 PANFROST_QUEUE_STATUS_ACTIVE,
30 PANFROST_QUEUE_STATUS_STOPPED,
31 PANFROST_QUEUE_STATUS_STARTING,
32 PANFROST_QUEUE_STATUS_FAULT_PENDING,
35 struct panfrost_queue_state {
36 struct drm_gpu_scheduler sched;
43 struct panfrost_job_slot {
44 struct panfrost_queue_state queue[NUM_JOB_SLOTS];
48 static struct panfrost_job *
49 to_panfrost_job(struct drm_sched_job *sched_job)
51 return container_of(sched_job, struct panfrost_job, base);
54 struct panfrost_fence {
55 struct dma_fence base;
56 struct drm_device *dev;
57 /* panfrost seqno for signaled() test */
62 static inline struct panfrost_fence *
63 to_panfrost_fence(struct dma_fence *fence)
65 return (struct panfrost_fence *)fence;
68 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
73 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
75 struct panfrost_fence *f = to_panfrost_fence(fence);
79 return "panfrost-js-0";
81 return "panfrost-js-1";
83 return "panfrost-js-2";
89 static const struct dma_fence_ops panfrost_fence_ops = {
90 .get_driver_name = panfrost_fence_get_driver_name,
91 .get_timeline_name = panfrost_fence_get_timeline_name,
94 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
96 struct panfrost_fence *fence;
97 struct panfrost_job_slot *js = pfdev->js;
99 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
101 return ERR_PTR(-ENOMEM);
103 fence->dev = pfdev->ddev;
104 fence->queue = js_num;
105 fence->seqno = ++js->queue[js_num].emit_seqno;
106 dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
107 js->queue[js_num].fence_context, fence->seqno);
112 static int panfrost_job_get_slot(struct panfrost_job *job)
114 /* JS0: fragment jobs.
115 * JS1: vertex/tiler jobs
118 if (job->requirements & PANFROST_JD_REQ_FS)
121 /* Not exposed to userspace yet */
123 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
124 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
125 (job->pfdev->features.nr_core_groups == 2))
127 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
134 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
141 * Use all cores for now.
142 * Eventually we may need to support tiler only jobs and h/w with
143 * multiple (2) coherent core groups
145 affinity = pfdev->features.shader_present;
147 job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
148 job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
151 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
153 struct panfrost_device *pfdev = job->pfdev;
155 u64 jc_head = job->jc;
158 panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
160 ret = pm_runtime_get_sync(pfdev->dev);
164 if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
168 cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
170 job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
171 job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
173 panfrost_job_write_affinity(pfdev, job->requirements, js);
175 /* start MMU, medium priority, cache clean/flush on end, clean/flush on
177 cfg |= JS_CONFIG_THREAD_PRI(8) |
178 JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
179 JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
181 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
182 cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
184 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
185 cfg |= JS_CONFIG_START_MMU;
187 job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
189 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
190 job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
193 dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
196 job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
199 static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
201 struct dma_fence **implicit_fences)
205 for (i = 0; i < bo_count; i++)
206 implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
209 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
211 struct dma_fence *fence)
215 for (i = 0; i < bo_count; i++)
216 dma_resv_add_excl_fence(bos[i]->resv, fence);
219 int panfrost_job_push(struct panfrost_job *job)
221 struct panfrost_device *pfdev = job->pfdev;
222 int slot = panfrost_job_get_slot(job);
223 struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
224 struct ww_acquire_ctx acquire_ctx;
227 mutex_lock(&pfdev->sched_lock);
229 ret = drm_gem_lock_reservations(job->bos, job->bo_count,
232 mutex_unlock(&pfdev->sched_lock);
236 ret = drm_sched_job_init(&job->base, entity, NULL);
238 mutex_unlock(&pfdev->sched_lock);
242 job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
244 kref_get(&job->refcount); /* put by scheduler job completion */
246 panfrost_acquire_object_fences(job->bos, job->bo_count,
247 job->implicit_fences);
249 drm_sched_entity_push_job(&job->base, entity);
251 mutex_unlock(&pfdev->sched_lock);
253 panfrost_attach_object_fences(job->bos, job->bo_count,
254 job->render_done_fence);
257 drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
262 static void panfrost_job_cleanup(struct kref *ref)
264 struct panfrost_job *job = container_of(ref, struct panfrost_job,
268 if (job->in_fences) {
269 for (i = 0; i < job->in_fence_count; i++)
270 dma_fence_put(job->in_fences[i]);
271 kvfree(job->in_fences);
273 if (job->implicit_fences) {
274 for (i = 0; i < job->bo_count; i++)
275 dma_fence_put(job->implicit_fences[i]);
276 kvfree(job->implicit_fences);
278 dma_fence_put(job->done_fence);
279 dma_fence_put(job->render_done_fence);
282 for (i = 0; i < job->bo_count; i++) {
283 if (!job->mappings[i])
286 atomic_dec(&job->mappings[i]->obj->gpu_usecount);
287 panfrost_gem_mapping_put(job->mappings[i]);
289 kvfree(job->mappings);
293 for (i = 0; i < job->bo_count; i++)
294 drm_gem_object_put(job->bos[i]);
302 void panfrost_job_put(struct panfrost_job *job)
304 kref_put(&job->refcount, panfrost_job_cleanup);
307 static void panfrost_job_free(struct drm_sched_job *sched_job)
309 struct panfrost_job *job = to_panfrost_job(sched_job);
311 drm_sched_job_cleanup(sched_job);
313 panfrost_job_put(job);
316 static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
317 struct drm_sched_entity *s_entity)
319 struct panfrost_job *job = to_panfrost_job(sched_job);
320 struct dma_fence *fence;
323 /* Explicit fences */
324 for (i = 0; i < job->in_fence_count; i++) {
325 if (job->in_fences[i]) {
326 fence = job->in_fences[i];
327 job->in_fences[i] = NULL;
332 /* Implicit fences, max. one per BO */
333 for (i = 0; i < job->bo_count; i++) {
334 if (job->implicit_fences[i]) {
335 fence = job->implicit_fences[i];
336 job->implicit_fences[i] = NULL;
344 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
346 struct panfrost_job *job = to_panfrost_job(sched_job);
347 struct panfrost_device *pfdev = job->pfdev;
348 int slot = panfrost_job_get_slot(job);
349 struct dma_fence *fence = NULL;
351 if (unlikely(job->base.s_fence->finished.error))
354 pfdev->jobs[slot] = job;
356 fence = panfrost_fence_create(pfdev, slot);
361 dma_fence_put(job->done_fence);
362 job->done_fence = dma_fence_get(fence);
364 panfrost_job_hw_submit(job, slot);
369 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
374 for (j = 0; j < NUM_JOB_SLOTS; j++) {
375 irq_mask |= MK_JS_MASK(j);
378 job_write(pfdev, JOB_INT_CLEAR, irq_mask);
379 job_write(pfdev, JOB_INT_MASK, irq_mask);
382 static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue,
383 struct drm_sched_job *bad)
385 enum panfrost_queue_status old_status;
386 bool stopped = false;
388 mutex_lock(&queue->lock);
389 old_status = atomic_xchg(&queue->status,
390 PANFROST_QUEUE_STATUS_STOPPED);
391 if (old_status == PANFROST_QUEUE_STATUS_STOPPED)
394 WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE);
395 drm_sched_stop(&queue->sched, bad);
397 drm_sched_increase_karma(bad);
402 * Set the timeout to max so the timer doesn't get started
403 * when we return from the timeout handler (restored in
404 * panfrost_scheduler_start()).
406 queue->sched.timeout = MAX_SCHEDULE_TIMEOUT;
409 mutex_unlock(&queue->lock);
414 static void panfrost_scheduler_start(struct panfrost_queue_state *queue)
416 enum panfrost_queue_status old_status;
418 mutex_lock(&queue->lock);
419 old_status = atomic_xchg(&queue->status,
420 PANFROST_QUEUE_STATUS_STARTING);
421 WARN_ON(old_status != PANFROST_QUEUE_STATUS_STOPPED);
423 /* Restore the original timeout before starting the scheduler. */
424 queue->sched.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS);
425 drm_sched_resubmit_jobs(&queue->sched);
426 drm_sched_start(&queue->sched, true);
427 old_status = atomic_xchg(&queue->status,
428 PANFROST_QUEUE_STATUS_ACTIVE);
429 if (old_status == PANFROST_QUEUE_STATUS_FAULT_PENDING)
430 drm_sched_fault(&queue->sched);
432 mutex_unlock(&queue->lock);
435 static void panfrost_job_timedout(struct drm_sched_job *sched_job)
437 struct panfrost_job *job = to_panfrost_job(sched_job);
438 struct panfrost_device *pfdev = job->pfdev;
439 int js = panfrost_job_get_slot(job);
442 * If the GPU managed to complete this jobs fence, the timeout is
443 * spurious. Bail out.
445 if (dma_fence_is_signaled(job->done_fence))
448 dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
450 job_read(pfdev, JS_CONFIG(js)),
451 job_read(pfdev, JS_STATUS(js)),
452 job_read(pfdev, JS_HEAD_LO(js)),
453 job_read(pfdev, JS_TAIL_LO(js)),
456 /* Scheduler is already stopped, nothing to do. */
457 if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
460 /* Schedule a reset if there's no reset in progress. */
461 if (!atomic_xchg(&pfdev->reset.pending, 1))
462 schedule_work(&pfdev->reset.work);
465 static const struct drm_sched_backend_ops panfrost_sched_ops = {
466 .dependency = panfrost_job_dependency,
467 .run_job = panfrost_job_run,
468 .timedout_job = panfrost_job_timedout,
469 .free_job = panfrost_job_free
472 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
474 struct panfrost_device *pfdev = data;
475 u32 status = job_read(pfdev, JOB_INT_STAT);
478 dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
483 pm_runtime_mark_last_busy(pfdev->dev);
485 for (j = 0; status; j++) {
486 u32 mask = MK_JS_MASK(j);
488 if (!(status & mask))
491 job_write(pfdev, JOB_INT_CLEAR, mask);
493 if (status & JOB_INT_MASK_ERR(j)) {
494 enum panfrost_queue_status old_status;
496 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
498 dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
500 panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
501 job_read(pfdev, JS_HEAD_LO(j)),
502 job_read(pfdev, JS_TAIL_LO(j)));
505 * When the queue is being restarted we don't report
506 * faults directly to avoid races between the timeout
507 * and reset handlers. panfrost_scheduler_start() will
508 * call drm_sched_fault() after the queue has been
509 * started if status == FAULT_PENDING.
511 old_status = atomic_cmpxchg(&pfdev->js->queue[j].status,
512 PANFROST_QUEUE_STATUS_STARTING,
513 PANFROST_QUEUE_STATUS_FAULT_PENDING);
514 if (old_status == PANFROST_QUEUE_STATUS_ACTIVE)
515 drm_sched_fault(&pfdev->js->queue[j].sched);
518 if (status & JOB_INT_MASK_DONE(j)) {
519 struct panfrost_job *job;
521 spin_lock(&pfdev->js->job_lock);
522 job = pfdev->jobs[j];
523 /* Only NULL if job timeout occurred */
525 pfdev->jobs[j] = NULL;
527 panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
528 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
530 dma_fence_signal_locked(job->done_fence);
531 pm_runtime_put_autosuspend(pfdev->dev);
533 spin_unlock(&pfdev->js->job_lock);
542 static void panfrost_reset(struct work_struct *work)
544 struct panfrost_device *pfdev = container_of(work,
545 struct panfrost_device,
551 cookie = dma_fence_begin_signalling();
552 for (i = 0; i < NUM_JOB_SLOTS; i++) {
554 * We want pending timeouts to be handled before we attempt
555 * to stop the scheduler. If we don't do that and the timeout
556 * handler is in flight, it might have removed the bad job
557 * from the list, and we'll lose this job if the reset handler
558 * enters the critical section in panfrost_scheduler_stop()
559 * before the timeout handler.
561 * Timeout is set to MAX_SCHEDULE_TIMEOUT - 1 because we need
562 * something big enough to make sure the timer will not expire
563 * before we manage to stop the scheduler, but we can't use
564 * MAX_SCHEDULE_TIMEOUT because drm_sched_get_cleanup_job()
565 * considers that as 'timer is not running' and will dequeue
566 * the job without making sure the timeout handler is not
569 pfdev->js->queue[i].sched.timeout = MAX_SCHEDULE_TIMEOUT - 1;
570 cancel_delayed_work_sync(&pfdev->js->queue[i].sched.work_tdr);
571 panfrost_scheduler_stop(&pfdev->js->queue[i], NULL);
574 /* All timers have been stopped, we can safely reset the pending state. */
575 atomic_set(&pfdev->reset.pending, 0);
577 spin_lock_irqsave(&pfdev->js->job_lock, flags);
578 for (i = 0; i < NUM_JOB_SLOTS; i++) {
579 if (pfdev->jobs[i]) {
580 pm_runtime_put_noidle(pfdev->dev);
581 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
582 pfdev->jobs[i] = NULL;
585 spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
587 panfrost_device_reset(pfdev);
589 for (i = 0; i < NUM_JOB_SLOTS; i++)
590 panfrost_scheduler_start(&pfdev->js->queue[i]);
592 dma_fence_end_signalling(cookie);
595 int panfrost_job_init(struct panfrost_device *pfdev)
597 struct panfrost_job_slot *js;
600 INIT_WORK(&pfdev->reset.work, panfrost_reset);
602 pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
606 spin_lock_init(&js->job_lock);
608 irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
612 ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
613 IRQF_SHARED, KBUILD_MODNAME "-job", pfdev);
615 dev_err(pfdev->dev, "failed to request job irq");
619 for (j = 0; j < NUM_JOB_SLOTS; j++) {
620 mutex_init(&js->queue[j].lock);
622 js->queue[j].fence_context = dma_fence_context_alloc(1);
624 ret = drm_sched_init(&js->queue[j].sched,
626 1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
629 dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
634 panfrost_job_enable_interrupts(pfdev);
639 for (j--; j >= 0; j--)
640 drm_sched_fini(&js->queue[j].sched);
645 void panfrost_job_fini(struct panfrost_device *pfdev)
647 struct panfrost_job_slot *js = pfdev->js;
650 job_write(pfdev, JOB_INT_MASK, 0);
652 for (j = 0; j < NUM_JOB_SLOTS; j++) {
653 drm_sched_fini(&js->queue[j].sched);
654 mutex_destroy(&js->queue[j].lock);
659 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
661 struct panfrost_device *pfdev = panfrost_priv->pfdev;
662 struct panfrost_job_slot *js = pfdev->js;
663 struct drm_gpu_scheduler *sched;
666 for (i = 0; i < NUM_JOB_SLOTS; i++) {
667 sched = &js->queue[i].sched;
668 ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
669 DRM_SCHED_PRIORITY_NORMAL, &sched,
677 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
681 for (i = 0; i < NUM_JOB_SLOTS; i++)
682 drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
685 int panfrost_job_is_idle(struct panfrost_device *pfdev)
687 struct panfrost_job_slot *js = pfdev->js;
690 for (i = 0; i < NUM_JOB_SLOTS; i++) {
691 /* If there are any jobs in the HW queue, we're not idle */
692 if (atomic_read(&js->queue[i].sched.hw_rq_count))