1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2021 HabanaLabs, Ltd.
8 #include <uapi/drm/habanalabs_accel.h>
9 #include "habanalabs.h"
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
14 #define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15 HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
16 HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \
17 HL_CS_FLAGS_ENGINES_COMMAND | HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
20 #define MAX_TS_ITER_NUM 100
23 * enum hl_cs_wait_status - cs wait status
24 * @CS_WAIT_STATUS_BUSY: cs was not completed yet
25 * @CS_WAIT_STATUS_COMPLETED: cs completed
26 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
28 enum hl_cs_wait_status {
30 CS_WAIT_STATUS_COMPLETED,
35 * Data used while handling wait/timestamp nodes.
36 * The purpose of this struct is to store the needed data for both operations
37 * in one variable instead of passing large number of arguments to functions.
39 struct wait_interrupt_data {
40 struct hl_user_interrupt *interrupt;
41 struct hl_mmap_mem_buf *buf;
42 struct hl_mem_mgr *mmg;
52 static void job_wq_completion(struct work_struct *work);
53 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
54 enum hl_cs_wait_status *status, s64 *timestamp);
55 static void cs_do_release(struct kref *ref);
57 static void hl_push_cs_outcome(struct hl_device *hdev,
58 struct hl_cs_outcome_store *outcome_store,
59 u64 seq, ktime_t ts, int error)
61 struct hl_cs_outcome *node;
65 * CS outcome store supports the following operations:
66 * push outcome - store a recent CS outcome in the store
67 * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store
68 * It uses 2 lists: used list and free list.
69 * It has a pre-allocated amount of nodes, each node stores
70 * a single CS outcome.
71 * Initially, all the nodes are in the free list.
72 * On push outcome, a node (any) is taken from the free list, its
73 * information is filled in, and the node is moved to the used list.
74 * It is possible, that there are no nodes left in the free list.
75 * In this case, we will lose some information about old outcomes. We
76 * will pop the OLDEST node from the used list, and make it free.
77 * On pop, the node is searched for in the used list (using a search
79 * If found, the node is then removed from the used list, and moved
80 * back to the free list. The outcome data that the node contained is
81 * returned back to the user.
84 spin_lock_irqsave(&outcome_store->db_lock, flags);
86 if (list_empty(&outcome_store->free_list)) {
87 node = list_last_entry(&outcome_store->used_list,
88 struct hl_cs_outcome, list_link);
89 hash_del(&node->map_link);
90 dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq);
92 node = list_last_entry(&outcome_store->free_list,
93 struct hl_cs_outcome, list_link);
96 list_del_init(&node->list_link);
102 list_add(&node->list_link, &outcome_store->used_list);
103 hash_add(outcome_store->outcome_map, &node->map_link, node->seq);
105 spin_unlock_irqrestore(&outcome_store->db_lock, flags);
108 static bool hl_pop_cs_outcome(struct hl_cs_outcome_store *outcome_store,
109 u64 seq, ktime_t *ts, int *error)
111 struct hl_cs_outcome *node;
114 spin_lock_irqsave(&outcome_store->db_lock, flags);
116 hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq)
117 if (node->seq == seq) {
119 *error = node->error;
121 hash_del(&node->map_link);
122 list_del_init(&node->list_link);
123 list_add(&node->list_link, &outcome_store->free_list);
125 spin_unlock_irqrestore(&outcome_store->db_lock, flags);
130 spin_unlock_irqrestore(&outcome_store->db_lock, flags);
135 static void hl_sob_reset(struct kref *ref)
137 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
139 struct hl_device *hdev = hw_sob->hdev;
141 dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
143 hdev->asic_funcs->reset_sob(hdev, hw_sob);
145 hw_sob->need_reset = false;
148 void hl_sob_reset_error(struct kref *ref)
150 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
152 struct hl_device *hdev = hw_sob->hdev;
155 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
156 hw_sob->q_idx, hw_sob->sob_id);
159 void hw_sob_put(struct hl_hw_sob *hw_sob)
162 kref_put(&hw_sob->kref, hl_sob_reset);
165 static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
168 kref_put(&hw_sob->kref, hl_sob_reset_error);
171 void hw_sob_get(struct hl_hw_sob *hw_sob)
174 kref_get(&hw_sob->kref);
178 * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
179 * @sob_base: sob base id
180 * @sob_mask: sob user mask, each bit represents a sob offset from sob base
181 * @mask: generated mask
183 * Return: 0 if given parameters are valid
185 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
192 if (sob_mask == 0x1) {
193 *mask = ~(1 << (sob_base & 0x7));
195 /* find msb in order to verify sob range is valid */
196 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
197 if (BIT(i) & sob_mask)
200 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
209 static void hl_fence_release(struct kref *kref)
211 struct hl_fence *fence =
212 container_of(kref, struct hl_fence, refcount);
213 struct hl_cs_compl *hl_cs_cmpl =
214 container_of(fence, struct hl_cs_compl, base_fence);
219 void hl_fence_put(struct hl_fence *fence)
221 if (IS_ERR_OR_NULL(fence))
223 kref_put(&fence->refcount, hl_fence_release);
226 void hl_fences_put(struct hl_fence **fence, int len)
230 for (i = 0; i < len; i++, fence++)
231 hl_fence_put(*fence);
234 void hl_fence_get(struct hl_fence *fence)
237 kref_get(&fence->refcount);
240 static void hl_fence_init(struct hl_fence *fence, u64 sequence)
242 kref_init(&fence->refcount);
243 fence->cs_sequence = sequence;
245 fence->timestamp = ktime_set(0, 0);
246 fence->mcs_handling_done = false;
247 init_completion(&fence->completion);
250 void cs_get(struct hl_cs *cs)
252 kref_get(&cs->refcount);
255 static int cs_get_unless_zero(struct hl_cs *cs)
257 return kref_get_unless_zero(&cs->refcount);
260 static void cs_put(struct hl_cs *cs)
262 kref_put(&cs->refcount, cs_do_release);
265 static void cs_job_do_release(struct kref *ref)
267 struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
272 static void hl_cs_job_put(struct hl_cs_job *job)
274 kref_put(&job->refcount, cs_job_do_release);
277 bool cs_needs_completion(struct hl_cs *cs)
279 /* In case this is a staged CS, only the last CS in sequence should
280 * get a completion, any non staged CS will always get a completion
282 if (cs->staged_cs && !cs->staged_last)
288 bool cs_needs_timeout(struct hl_cs *cs)
290 /* In case this is a staged CS, only the first CS in sequence should
291 * get a timeout, any non staged CS will always get a timeout
293 if (cs->staged_cs && !cs->staged_first)
299 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
301 /* Patched CB is created for external queues jobs */
302 return (job->queue_type == QUEUE_TYPE_EXT);
306 * cs_parser - parse the user command submission
308 * @hpriv : pointer to the private data of the fd
309 * @job : pointer to the job that holds the command submission info
311 * The function parses the command submission of the user. It calls the
312 * ASIC specific parser, which returns a list of memory blocks to send
313 * to the device as different command buffers
316 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
318 struct hl_device *hdev = hpriv->hdev;
319 struct hl_cs_parser parser;
322 parser.ctx_id = job->cs->ctx->asid;
323 parser.cs_sequence = job->cs->sequence;
324 parser.job_id = job->id;
326 parser.hw_queue_id = job->hw_queue_id;
327 parser.job_userptr_list = &job->userptr_list;
328 parser.patched_cb = NULL;
329 parser.user_cb = job->user_cb;
330 parser.user_cb_size = job->user_cb_size;
331 parser.queue_type = job->queue_type;
332 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
333 job->patched_cb = NULL;
334 parser.completion = cs_needs_completion(job->cs);
336 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
338 if (is_cb_patched(hdev, job)) {
340 job->patched_cb = parser.patched_cb;
341 job->job_cb_size = parser.patched_cb_size;
342 job->contains_dma_pkt = parser.contains_dma_pkt;
343 atomic_inc(&job->patched_cb->cs_cnt);
347 * Whether the parsing worked or not, we don't need the
348 * original CB anymore because it was already parsed and
349 * won't be accessed again for this CS
351 atomic_dec(&job->user_cb->cs_cnt);
352 hl_cb_put(job->user_cb);
355 job->job_cb_size = job->user_cb_size;
361 static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
363 struct hl_cs *cs = job->cs;
365 if (is_cb_patched(hdev, job)) {
366 hl_userptr_delete_list(hdev, &job->userptr_list);
369 * We might arrive here from rollback and patched CB wasn't
370 * created, so we need to check it's not NULL
372 if (job->patched_cb) {
373 atomic_dec(&job->patched_cb->cs_cnt);
374 hl_cb_put(job->patched_cb);
378 /* For H/W queue jobs, if a user CB was allocated by driver,
379 * the user CB isn't released in cs_parser() and thus should be
380 * released here. This is also true for INT queues jobs which were
381 * allocated by driver.
383 if (job->is_kernel_allocated_cb &&
384 (job->queue_type == QUEUE_TYPE_HW || job->queue_type == QUEUE_TYPE_INT)) {
385 atomic_dec(&job->user_cb->cs_cnt);
386 hl_cb_put(job->user_cb);
390 * This is the only place where there can be multiple threads
391 * modifying the list at the same time
393 spin_lock(&cs->job_lock);
394 list_del(&job->cs_node);
395 spin_unlock(&cs->job_lock);
397 hl_debugfs_remove_job(hdev, job);
399 /* We decrement reference only for a CS that gets completion
400 * because the reference was incremented only for this kind of CS
401 * right before it was scheduled.
403 * In staged submission, only the last CS marked as 'staged_last'
404 * gets completion, hence its release function will be called from here.
405 * As for all the rest CS's in the staged submission which do not get
406 * completion, their CS reference will be decremented by the
407 * 'staged_last' CS during the CS release flow.
408 * All relevant PQ CI counters will be incremented during the CS release
409 * flow by calling 'hl_hw_queue_update_ci'.
411 if (cs_needs_completion(cs) &&
412 (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) {
414 /* In CS based completions, the timestamp is already available,
415 * so no need to extract it from job
417 if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB)
418 cs->completion_timestamp = job->timestamp;
427 * hl_staged_cs_find_first - locate the first CS in this staged submission
429 * @hdev: pointer to device structure
430 * @cs_seq: staged submission sequence number
432 * @note: This function must be called under 'hdev->cs_mirror_lock'
434 * Find and return a CS pointer with the given sequence
436 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
440 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
441 if (cs->staged_cs && cs->staged_first &&
442 cs->sequence == cs_seq)
449 * is_staged_cs_last_exists - returns true if the last CS in sequence exists
451 * @hdev: pointer to device structure
452 * @cs: staged submission member
455 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
457 struct hl_cs *last_entry;
459 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
462 if (last_entry->staged_last)
469 * staged_cs_get - get CS reference if this CS is a part of a staged CS
471 * @hdev: pointer to device structure
473 * @cs_seq: staged submission sequence number
475 * Increment CS reference for every CS in this staged submission except for
476 * the CS which get completion.
478 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
480 /* Only the last CS in this staged submission will get a completion.
481 * We must increment the reference for all other CS's in this
483 * Once we get a completion we will release the whole staged submission.
485 if (!cs->staged_last)
490 * staged_cs_put - put a CS in case it is part of staged submission
492 * @hdev: pointer to device structure
495 * This function decrements a CS reference (for a non completion CS)
497 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
499 /* We release all CS's in a staged submission except the last
500 * CS which we have never incremented its reference.
502 if (!cs_needs_completion(cs))
506 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
508 struct hl_cs *next = NULL, *iter, *first_cs;
510 if (!cs_needs_timeout(cs))
513 spin_lock(&hdev->cs_mirror_lock);
515 /* We need to handle tdr only once for the complete staged submission.
516 * Hence, we choose the CS that reaches this function first which is
517 * the CS marked as 'staged_last'.
518 * In case single staged cs was submitted which has both first and last
519 * indications, then "cs_find_first" below will return NULL, since we
520 * removed the cs node from the list before getting here,
521 * in such cases just continue with the cs to cancel it's TDR work.
523 if (cs->staged_cs && cs->staged_last) {
524 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
529 spin_unlock(&hdev->cs_mirror_lock);
531 /* Don't cancel TDR in case this CS was timedout because we might be
532 * running from the TDR context
534 if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT)
538 cancel_delayed_work_sync(&cs->work_tdr);
540 spin_lock(&hdev->cs_mirror_lock);
542 /* queue TDR for next CS */
543 list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node)
544 if (cs_needs_timeout(iter)) {
549 if (next && !next->tdr_active) {
550 next->tdr_active = true;
551 schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
554 spin_unlock(&hdev->cs_mirror_lock);
558 * force_complete_multi_cs - complete all contexts that wait on multi-CS
560 * @hdev: pointer to habanalabs device structure
562 static void force_complete_multi_cs(struct hl_device *hdev)
566 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
567 struct multi_cs_completion *mcs_compl;
569 mcs_compl = &hdev->multi_cs_completion[i];
571 spin_lock(&mcs_compl->lock);
573 if (!mcs_compl->used) {
574 spin_unlock(&mcs_compl->lock);
578 /* when calling force complete no context should be waiting on
580 * We are calling the function as a protection for such case
581 * to free any pending context and print error message
584 "multi-CS completion context %d still waiting when calling force completion\n",
586 complete_all(&mcs_compl->completion);
587 spin_unlock(&mcs_compl->lock);
592 * complete_multi_cs - complete all waiting entities on multi-CS
594 * @hdev: pointer to habanalabs device structure
596 * The function signals a waiting entity that has an overlapping stream masters
597 * with the completed CS.
599 * - a completed CS worked on stream master QID 4, multi CS completion
600 * is actively waiting on stream master QIDs 3, 5. don't send signal as no
601 * common stream master QID
602 * - a completed CS worked on stream master QID 4, multi CS completion
603 * is actively waiting on stream master QIDs 3, 4. send signal as stream
604 * master QID 4 is common
606 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
608 struct hl_fence *fence = cs->fence;
611 /* in case of multi CS check for completion only for the first CS */
612 if (cs->staged_cs && !cs->staged_first)
615 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
616 struct multi_cs_completion *mcs_compl;
618 mcs_compl = &hdev->multi_cs_completion[i];
619 if (!mcs_compl->used)
622 spin_lock(&mcs_compl->lock);
626 * 1. still waiting for completion
627 * 2. the completed CS has at least one overlapping stream
628 * master with the stream masters in the completion
630 if (mcs_compl->used &&
631 (fence->stream_master_qid_map &
632 mcs_compl->stream_master_qid_map)) {
633 /* extract the timestamp only of first completed CS */
634 if (!mcs_compl->timestamp)
635 mcs_compl->timestamp = ktime_to_ns(fence->timestamp);
637 complete_all(&mcs_compl->completion);
640 * Setting mcs_handling_done inside the lock ensures
641 * at least one fence have mcs_handling_done set to
642 * true before wait for mcs finish. This ensures at
643 * least one CS will be set as completed when polling
646 fence->mcs_handling_done = true;
649 spin_unlock(&mcs_compl->lock);
651 /* In case CS completed without mcs completion initialized */
652 fence->mcs_handling_done = true;
655 static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
657 struct hl_cs_compl *hl_cs_cmpl)
659 /* Skip this handler if the cs wasn't submitted, to avoid putting
660 * the hw_sob twice, since this case already handled at this point,
661 * also skip if the hw_sob pointer wasn't set.
663 if (!hl_cs_cmpl->hw_sob || !cs->submitted)
666 spin_lock(&hl_cs_cmpl->lock);
669 * we get refcount upon reservation of signals or signal/wait cs for the
670 * hw_sob object, and need to put it when the first staged cs
671 * (which contains the encaps signals) or cs signal/wait is completed.
673 if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
674 (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
675 (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
676 (!!hl_cs_cmpl->encaps_signals)) {
678 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
681 hl_cs_cmpl->hw_sob->sob_id,
682 hl_cs_cmpl->sob_val);
684 hw_sob_put(hl_cs_cmpl->hw_sob);
686 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
687 hdev->asic_funcs->reset_sob_group(hdev,
688 hl_cs_cmpl->sob_group);
691 spin_unlock(&hl_cs_cmpl->lock);
694 static void cs_do_release(struct kref *ref)
696 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
697 struct hl_device *hdev = cs->ctx->hdev;
698 struct hl_cs_job *job, *tmp;
699 struct hl_cs_compl *hl_cs_cmpl =
700 container_of(cs->fence, struct hl_cs_compl, base_fence);
702 cs->completed = true;
705 * Although if we reached here it means that all external jobs have
706 * finished, because each one of them took refcnt to CS, we still
707 * need to go over the internal jobs and complete them. Otherwise, we
708 * will have leaked memory and what's worse, the CS object (and
709 * potentially the CTX object) could be released, while the JOB
710 * still holds a pointer to them (but no reference).
712 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
713 hl_complete_job(hdev, job);
715 if (!cs->submitted) {
717 * In case the wait for signal CS was submitted, the fence put
718 * occurs in init_signal_wait_cs() or collective_wait_init_cs()
719 * right before hanging on the PQ.
721 if (cs->type == CS_TYPE_WAIT ||
722 cs->type == CS_TYPE_COLLECTIVE_WAIT)
723 hl_fence_put(cs->signal_fence);
728 /* Need to update CI for all queue jobs that does not get completion */
729 hl_hw_queue_update_ci(cs);
731 /* remove CS from CS mirror list */
732 spin_lock(&hdev->cs_mirror_lock);
733 list_del_init(&cs->mirror_node);
734 spin_unlock(&hdev->cs_mirror_lock);
736 cs_handle_tdr(hdev, cs);
739 /* the completion CS decrements reference for the entire
742 if (cs->staged_last) {
743 struct hl_cs *staged_cs, *tmp_cs;
745 list_for_each_entry_safe(staged_cs, tmp_cs,
746 &cs->staged_cs_node, staged_cs_node)
747 staged_cs_put(hdev, staged_cs);
750 /* A staged CS will be a member in the list only after it
751 * was submitted. We used 'cs_mirror_lock' when inserting
752 * it to list so we will use it again when removing it
755 spin_lock(&hdev->cs_mirror_lock);
756 list_del(&cs->staged_cs_node);
757 spin_unlock(&hdev->cs_mirror_lock);
760 /* decrement refcount to handle when first staged cs
761 * with encaps signals is completed.
763 if (hl_cs_cmpl->encaps_signals)
764 kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
765 hl_encaps_release_handle_and_put_ctx);
768 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals)
769 kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
772 /* Must be called before hl_ctx_put because inside we use ctx to get
775 hl_debugfs_remove_cs(cs);
777 hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL;
779 /* We need to mark an error for not submitted because in that case
780 * the hl fence release flow is different. Mainly, we don't need
781 * to handle hw_sob for signal/wait
784 cs->fence->error = -ETIMEDOUT;
785 else if (cs->aborted)
786 cs->fence->error = -EIO;
787 else if (!cs->submitted)
788 cs->fence->error = -EBUSY;
790 if (unlikely(cs->skip_reset_on_timeout)) {
792 "Command submission %llu completed after %llu (s)\n",
794 div_u64(jiffies - cs->submission_time_jiffies, HZ));
798 cs->fence->timestamp = cs->completion_timestamp;
799 hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
800 cs->fence->timestamp, cs->fence->error);
805 complete_all(&cs->fence->completion);
806 complete_multi_cs(hdev, cs);
808 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
810 hl_fence_put(cs->fence);
812 kfree(cs->jobs_in_queue_cnt);
816 static void cs_timedout(struct work_struct *work)
818 struct hl_cs *cs = container_of(work, struct hl_cs, work_tdr.work);
819 bool skip_reset_on_timeout, device_reset = false;
820 struct hl_device *hdev;
821 u64 event_mask = 0x0;
825 skip_reset_on_timeout = cs->skip_reset_on_timeout;
827 rc = cs_get_unless_zero(cs);
831 if ((!cs->submitted) || (cs->completed)) {
836 hdev = cs->ctx->hdev;
838 if (likely(!skip_reset_on_timeout)) {
839 if (hdev->reset_on_lockup)
842 hdev->reset_info.needs_reset = true;
844 /* Mark the CS is timed out so we won't try to cancel its TDR */
848 /* Save only the first CS timeout parameters */
849 rc = atomic_cmpxchg(&hdev->captured_err_info.cs_timeout.write_enable, 1, 0);
851 hdev->captured_err_info.cs_timeout.timestamp = ktime_get();
852 hdev->captured_err_info.cs_timeout.seq = cs->sequence;
853 event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT;
856 timeout_sec = jiffies_to_msecs(hdev->timeout_jiffies) / 1000;
861 "Signal command submission %llu has not finished in %u seconds!\n",
862 cs->sequence, timeout_sec);
867 "Wait command submission %llu has not finished in %u seconds!\n",
868 cs->sequence, timeout_sec);
871 case CS_TYPE_COLLECTIVE_WAIT:
873 "Collective Wait command submission %llu has not finished in %u seconds!\n",
874 cs->sequence, timeout_sec);
879 "Command submission %llu has not finished in %u seconds!\n",
880 cs->sequence, timeout_sec);
884 rc = hl_state_dump(hdev);
886 dev_err(hdev->dev, "Error during system state dump %d\n", rc);
891 event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
892 hl_device_cond_reset(hdev, HL_DRV_RESET_TDR, event_mask);
893 } else if (event_mask) {
894 hl_notifier_event_send_all(hdev, event_mask);
898 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
899 enum hl_cs_type cs_type, u64 user_sequence,
900 struct hl_cs **cs_new, u32 flags, u32 timeout)
902 struct hl_cs_counters_atomic *cntr;
903 struct hl_fence *other = NULL;
904 struct hl_cs_compl *cs_cmpl;
908 cntr = &hdev->aggregated_cs_counters;
910 cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
912 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
915 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
916 atomic64_inc(&cntr->out_of_mem_drop_cnt);
920 /* increment refcnt for context */
924 cs->submitted = false;
925 cs->completed = false;
927 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
928 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
929 cs->timeout_jiffies = timeout;
930 cs->skip_reset_on_timeout =
931 hdev->reset_info.skip_reset_on_timeout ||
932 !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
933 cs->submission_time_jiffies = jiffies;
934 INIT_LIST_HEAD(&cs->job_list);
935 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
936 kref_init(&cs->refcount);
937 spin_lock_init(&cs->job_lock);
939 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
941 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
944 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
945 atomic64_inc(&cntr->out_of_mem_drop_cnt);
950 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
951 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
952 if (!cs->jobs_in_queue_cnt)
953 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
954 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
956 if (!cs->jobs_in_queue_cnt) {
957 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
958 atomic64_inc(&cntr->out_of_mem_drop_cnt);
963 cs_cmpl->hdev = hdev;
964 cs_cmpl->type = cs->type;
965 spin_lock_init(&cs_cmpl->lock);
966 cs->fence = &cs_cmpl->base_fence;
968 spin_lock(&ctx->cs_lock);
970 cs_cmpl->cs_seq = ctx->cs_sequence;
971 other = ctx->cs_pending[cs_cmpl->cs_seq &
972 (hdev->asic_prop.max_pending_cs - 1)];
974 if (other && !completion_done(&other->completion)) {
975 /* If the following statement is true, it means we have reached
976 * a point in which only part of the staged submission was
977 * submitted and we don't have enough room in the 'cs_pending'
978 * array for the rest of the submission.
979 * This causes a deadlock because this CS will never be
980 * completed as it depends on future CS's for completion.
982 if (other->cs_sequence == user_sequence)
983 dev_crit_ratelimited(hdev->dev,
984 "Staged CS %llu deadlock due to lack of resources",
987 dev_dbg_ratelimited(hdev->dev,
988 "Rejecting CS because of too many in-flights CS\n");
989 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
990 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
996 hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
998 cs->sequence = cs_cmpl->cs_seq;
1000 ctx->cs_pending[cs_cmpl->cs_seq &
1001 (hdev->asic_prop.max_pending_cs - 1)] =
1002 &cs_cmpl->base_fence;
1005 hl_fence_get(&cs_cmpl->base_fence);
1007 hl_fence_put(other);
1009 spin_unlock(&ctx->cs_lock);
1016 spin_unlock(&ctx->cs_lock);
1017 kfree(cs->jobs_in_queue_cnt);
1026 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
1028 struct hl_cs_job *job, *tmp;
1030 staged_cs_put(hdev, cs);
1032 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1033 hl_complete_job(hdev, job);
1037 * release_reserved_encaps_signals() - release reserved encapsulated signals.
1038 * @hdev: pointer to habanalabs device structure
1040 * Release reserved encapsulated signals which weren't un-reserved, or for which a CS with
1041 * encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back.
1042 * For these signals need also to put the refcount of the H/W SOB which was taken at the
1045 static void release_reserved_encaps_signals(struct hl_device *hdev)
1047 struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
1048 struct hl_cs_encaps_sig_handle *handle;
1049 struct hl_encaps_signals_mgr *mgr;
1055 mgr = &ctx->sig_mgr;
1057 idr_for_each_entry(&mgr->handles, handle, id)
1058 if (handle->cs_seq == ULLONG_MAX)
1059 kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx);
1064 void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
1067 struct hl_cs *cs, *tmp;
1069 if (!skip_wq_flush) {
1070 flush_workqueue(hdev->ts_free_obj_wq);
1072 /* flush all completions before iterating over the CS mirror list in
1073 * order to avoid a race with the release functions
1075 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1076 flush_workqueue(hdev->cq_wq[i]);
1078 flush_workqueue(hdev->cs_cmplt_wq);
1081 /* Make sure we don't have leftovers in the CS mirror list */
1082 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
1085 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
1086 cs->ctx->asid, cs->sequence);
1087 cs_rollback(hdev, cs);
1091 force_complete_multi_cs(hdev);
1093 release_reserved_encaps_signals(hdev);
1097 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
1099 struct hl_user_pending_interrupt *pend, *temp;
1100 unsigned long flags;
1102 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
1103 list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, list_node) {
1104 pend->fence.error = -EIO;
1105 complete_all(&pend->fence.completion);
1107 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
1109 spin_lock_irqsave(&interrupt->ts_list_lock, flags);
1110 list_for_each_entry_safe(pend, temp, &interrupt->ts_list_head, list_node) {
1111 list_del(&pend->list_node);
1112 hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
1113 hl_cb_put(pend->ts_reg_info.cq_cb);
1115 spin_unlock_irqrestore(&interrupt->ts_list_lock, flags);
1118 void hl_release_pending_user_interrupts(struct hl_device *hdev)
1120 struct asic_fixed_properties *prop = &hdev->asic_prop;
1121 struct hl_user_interrupt *interrupt;
1124 if (!prop->user_interrupt_count)
1127 /* We iterate through the user interrupt requests and waking up all
1128 * user threads waiting for interrupt completion. We iterate the
1129 * list under a lock, this is why all user threads, once awake,
1130 * will wait on the same lock and will release the waiting object upon
1134 for (i = 0 ; i < prop->user_interrupt_count ; i++) {
1135 interrupt = &hdev->user_interrupt[i];
1136 wake_pending_user_interrupt_threads(interrupt);
1139 interrupt = &hdev->common_user_cq_interrupt;
1140 wake_pending_user_interrupt_threads(interrupt);
1142 interrupt = &hdev->common_decoder_interrupt;
1143 wake_pending_user_interrupt_threads(interrupt);
1146 static void force_complete_cs(struct hl_device *hdev)
1150 spin_lock(&hdev->cs_mirror_lock);
1152 list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) {
1153 cs->fence->error = -EIO;
1154 complete_all(&cs->fence->completion);
1157 spin_unlock(&hdev->cs_mirror_lock);
1160 void hl_abort_waiting_for_cs_completions(struct hl_device *hdev)
1162 force_complete_cs(hdev);
1163 force_complete_multi_cs(hdev);
1166 static void job_wq_completion(struct work_struct *work)
1168 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
1170 struct hl_cs *cs = job->cs;
1171 struct hl_device *hdev = cs->ctx->hdev;
1173 /* job is no longer needed */
1174 hl_complete_job(hdev, job);
1177 static void cs_completion(struct work_struct *work)
1179 struct hl_cs *cs = container_of(work, struct hl_cs, finish_work);
1180 struct hl_device *hdev = cs->ctx->hdev;
1181 struct hl_cs_job *job, *tmp;
1183 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
1184 hl_complete_job(hdev, job);
1187 u32 hl_get_active_cs_num(struct hl_device *hdev)
1189 u32 active_cs_num = 0;
1192 spin_lock(&hdev->cs_mirror_lock);
1194 list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node)
1198 spin_unlock(&hdev->cs_mirror_lock);
1200 return active_cs_num;
1203 static int validate_queue_index(struct hl_device *hdev,
1204 struct hl_cs_chunk *chunk,
1205 enum hl_queue_type *queue_type,
1206 bool *is_kernel_allocated_cb)
1208 struct asic_fixed_properties *asic = &hdev->asic_prop;
1209 struct hw_queue_properties *hw_queue_prop;
1211 /* This must be checked here to prevent out-of-bounds access to
1212 * hw_queues_props array
1214 if (chunk->queue_index >= asic->max_queues) {
1215 dev_err(hdev->dev, "Queue index %d is invalid\n",
1216 chunk->queue_index);
1220 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
1222 if (hw_queue_prop->type == QUEUE_TYPE_NA) {
1223 dev_err(hdev->dev, "Queue index %d is not applicable\n",
1224 chunk->queue_index);
1228 if (hw_queue_prop->binned) {
1229 dev_err(hdev->dev, "Queue index %d is binned out\n",
1230 chunk->queue_index);
1234 if (hw_queue_prop->driver_only) {
1236 "Queue index %d is restricted for the kernel driver\n",
1237 chunk->queue_index);
1241 /* When hw queue type isn't QUEUE_TYPE_HW,
1242 * USER_ALLOC_CB flag shall be referred as "don't care".
1244 if (hw_queue_prop->type == QUEUE_TYPE_HW) {
1245 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
1246 if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
1248 "Queue index %d doesn't support user CB\n",
1249 chunk->queue_index);
1253 *is_kernel_allocated_cb = false;
1255 if (!(hw_queue_prop->cb_alloc_flags &
1258 "Queue index %d doesn't support kernel CB\n",
1259 chunk->queue_index);
1263 *is_kernel_allocated_cb = true;
1266 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
1270 *queue_type = hw_queue_prop->type;
1274 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
1275 struct hl_mem_mgr *mmg,
1276 struct hl_cs_chunk *chunk)
1280 cb = hl_cb_get(mmg, chunk->cb_handle);
1282 dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle);
1286 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
1287 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
1291 atomic_inc(&cb->cs_cnt);
1300 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
1301 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
1303 struct hl_cs_job *job;
1305 job = kzalloc(sizeof(*job), GFP_ATOMIC);
1307 job = kzalloc(sizeof(*job), GFP_KERNEL);
1312 kref_init(&job->refcount);
1313 job->queue_type = queue_type;
1314 job->is_kernel_allocated_cb = is_kernel_allocated_cb;
1316 if (is_cb_patched(hdev, job))
1317 INIT_LIST_HEAD(&job->userptr_list);
1319 if (job->queue_type == QUEUE_TYPE_EXT)
1320 INIT_WORK(&job->finish_work, job_wq_completion);
1325 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
1327 if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
1328 return CS_TYPE_SIGNAL;
1329 else if (cs_type_flags & HL_CS_FLAGS_WAIT)
1330 return CS_TYPE_WAIT;
1331 else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
1332 return CS_TYPE_COLLECTIVE_WAIT;
1333 else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
1334 return CS_RESERVE_SIGNALS;
1335 else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
1336 return CS_UNRESERVE_SIGNALS;
1337 else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
1338 return CS_TYPE_ENGINE_CORE;
1339 else if (cs_type_flags & HL_CS_FLAGS_ENGINES_COMMAND)
1340 return CS_TYPE_ENGINES;
1341 else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
1342 return CS_TYPE_FLUSH_PCI_HBW_WRITES;
1344 return CS_TYPE_DEFAULT;
1347 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
1349 struct hl_device *hdev = hpriv->hdev;
1350 struct hl_ctx *ctx = hpriv->ctx;
1351 u32 cs_type_flags, num_chunks;
1352 enum hl_device_status status;
1353 enum hl_cs_type cs_type;
1354 bool is_sync_stream;
1357 for (i = 0 ; i < sizeof(args->in.pad) ; i++)
1358 if (args->in.pad[i]) {
1359 dev_dbg(hdev->dev, "Padding bytes must be 0\n");
1363 if (!hl_device_operational(hdev, &status)) {
1367 if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1368 !hdev->supports_staged_submission) {
1369 dev_err(hdev->dev, "staged submission not supported");
1373 cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
1375 if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
1377 "CS type flags are mutually exclusive, context %d\n",
1382 cs_type = hl_cs_get_cs_type(cs_type_flags);
1383 num_chunks = args->in.num_chunks_execute;
1385 is_sync_stream = (cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
1386 cs_type == CS_TYPE_COLLECTIVE_WAIT);
1388 if (unlikely(is_sync_stream && !hdev->supports_sync_stream)) {
1389 dev_err(hdev->dev, "Sync stream CS is not supported\n");
1393 if (cs_type == CS_TYPE_DEFAULT) {
1395 dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
1398 } else if (is_sync_stream && num_chunks != 1) {
1400 "Sync stream CS mandates one chunk only, context %d\n",
1408 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
1409 struct hl_cs_chunk **cs_chunk_array,
1410 void __user *chunks, u32 num_chunks,
1415 if (num_chunks > HL_MAX_JOBS_PER_CS) {
1416 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1417 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1419 "Number of chunks can NOT be larger than %d\n",
1420 HL_MAX_JOBS_PER_CS);
1424 *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
1426 if (!*cs_chunk_array)
1427 *cs_chunk_array = kmalloc_array(num_chunks,
1428 sizeof(**cs_chunk_array), GFP_KERNEL);
1429 if (!*cs_chunk_array) {
1430 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1431 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1435 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
1436 if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
1437 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1438 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1439 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
1440 kfree(*cs_chunk_array);
1447 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
1448 u64 sequence, u32 flags,
1449 u32 encaps_signal_handle)
1451 if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1454 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1455 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1457 if (cs->staged_first) {
1458 /* Staged CS sequence is the first CS sequence */
1459 INIT_LIST_HEAD(&cs->staged_cs_node);
1460 cs->staged_sequence = cs->sequence;
1462 if (cs->encaps_signals)
1463 cs->encaps_sig_hdl_id = encaps_signal_handle;
1465 /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1466 * under the cs_mirror_lock
1468 cs->staged_sequence = sequence;
1471 /* Increment CS reference if needed */
1472 staged_cs_get(hdev, cs);
1474 cs->staged_cs = true;
1479 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
1483 for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
1484 if (qid == hdev->stream_master_qid_arr[i])
1490 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
1491 u32 num_chunks, u64 *cs_seq, u32 flags,
1492 u32 encaps_signals_handle, u32 timeout,
1493 u16 *signal_initial_sob_count)
1495 bool staged_mid, int_queues_only = true, using_hw_queues = false;
1496 struct hl_device *hdev = hpriv->hdev;
1497 struct hl_cs_chunk *cs_chunk_array;
1498 struct hl_cs_counters_atomic *cntr;
1499 struct hl_ctx *ctx = hpriv->ctx;
1500 struct hl_cs_job *job;
1504 u8 stream_master_qid_map = 0;
1507 cntr = &hdev->aggregated_cs_counters;
1508 user_sequence = *cs_seq;
1509 *cs_seq = ULLONG_MAX;
1511 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1516 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1517 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1522 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1523 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
1526 goto free_cs_chunk_array;
1528 *cs_seq = cs->sequence;
1530 hl_debugfs_add_cs(cs);
1532 rc = cs_staged_submission(hdev, cs, user_sequence, flags,
1533 encaps_signals_handle);
1535 goto free_cs_object;
1537 /* If this is a staged submission we must return the staged sequence
1538 * rather than the internal CS sequence
1541 *cs_seq = cs->staged_sequence;
1543 /* Validate ALL the CS chunks before submitting the CS */
1544 for (i = 0 ; i < num_chunks ; i++) {
1545 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
1546 enum hl_queue_type queue_type;
1547 bool is_kernel_allocated_cb;
1549 rc = validate_queue_index(hdev, chunk, &queue_type,
1550 &is_kernel_allocated_cb);
1552 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1553 atomic64_inc(&cntr->validation_drop_cnt);
1554 goto free_cs_object;
1557 if (is_kernel_allocated_cb) {
1558 cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk);
1561 &ctx->cs_counters.validation_drop_cnt);
1562 atomic64_inc(&cntr->validation_drop_cnt);
1564 goto free_cs_object;
1567 cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
1570 if (queue_type == QUEUE_TYPE_EXT ||
1571 queue_type == QUEUE_TYPE_HW) {
1572 int_queues_only = false;
1575 * store which stream are being used for external/HW
1578 if (hdev->supports_wait_for_multi_cs)
1579 stream_master_qid_map |=
1580 get_stream_master_qid_mask(hdev,
1581 chunk->queue_index);
1584 if (queue_type == QUEUE_TYPE_HW)
1585 using_hw_queues = true;
1587 job = hl_cs_allocate_job(hdev, queue_type,
1588 is_kernel_allocated_cb);
1590 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1591 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1592 dev_err(hdev->dev, "Failed to allocate a new job\n");
1594 if (is_kernel_allocated_cb)
1597 goto free_cs_object;
1603 job->user_cb_size = chunk->cb_size;
1604 job->hw_queue_id = chunk->queue_index;
1606 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1609 list_add_tail(&job->cs_node, &cs->job_list);
1612 * Increment CS reference. When CS reference is 0, CS is
1613 * done and can be signaled to user and free all its resources
1614 * Only increment for JOB on external or H/W queues, because
1615 * only for those JOBs we get completion
1617 if (cs_needs_completion(cs) &&
1618 (job->queue_type == QUEUE_TYPE_EXT ||
1619 job->queue_type == QUEUE_TYPE_HW))
1622 hl_debugfs_add_job(hdev, job);
1624 rc = cs_parser(hpriv, job);
1626 atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1627 atomic64_inc(&cntr->parsing_drop_cnt);
1629 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1630 cs->ctx->asid, cs->sequence, job->id, rc);
1631 goto free_cs_object;
1635 /* We allow a CS with any queue type combination as long as it does
1636 * not get a completion
1638 if (int_queues_only && cs_needs_completion(cs)) {
1639 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1640 atomic64_inc(&cntr->validation_drop_cnt);
1642 "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
1643 cs->ctx->asid, cs->sequence);
1645 goto free_cs_object;
1648 if (using_hw_queues)
1649 INIT_WORK(&cs->finish_work, cs_completion);
1652 * store the (external/HW queues) streams used by the CS in the
1653 * fence object for multi-CS completion
1655 if (hdev->supports_wait_for_multi_cs)
1656 cs->fence->stream_master_qid_map = stream_master_qid_map;
1658 rc = hl_hw_queue_schedule_cs(cs);
1662 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1663 cs->ctx->asid, cs->sequence, rc);
1664 goto free_cs_object;
1667 *signal_initial_sob_count = cs->initial_sob_count;
1669 rc = HL_CS_STATUS_SUCCESS;
1673 atomic_dec(&cb->cs_cnt);
1676 cs_rollback(hdev, cs);
1677 *cs_seq = ULLONG_MAX;
1678 /* The path below is both for good and erroneous exits */
1680 /* We finished with the CS in this function, so put the ref */
1682 free_cs_chunk_array:
1683 kfree(cs_chunk_array);
1688 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1691 struct hl_device *hdev = hpriv->hdev;
1692 struct hl_ctx *ctx = hpriv->ctx;
1693 bool need_soft_reset = false;
1694 int rc = 0, do_ctx_switch = 0;
1695 void __user *chunks;
1696 u32 num_chunks, tmp;
1700 if (hdev->supports_ctx_switch)
1701 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1703 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1704 mutex_lock(&hpriv->restore_phase_mutex);
1706 if (do_ctx_switch) {
1707 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1709 dev_err_ratelimited(hdev->dev,
1710 "Failed to switch to context %d, rejecting CS! %d\n",
1713 * If we timedout, or if the device is not IDLE
1714 * while we want to do context-switch (-EBUSY),
1715 * we need to soft-reset because QMAN is
1716 * probably stuck. However, we can't call to
1717 * reset here directly because of deadlock, so
1718 * need to do it at the very end of this
1721 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1722 need_soft_reset = true;
1723 mutex_unlock(&hpriv->restore_phase_mutex);
1728 hdev->asic_funcs->restore_phase_topology(hdev);
1730 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1731 num_chunks = args->in.num_chunks_restore;
1735 "Need to run restore phase but restore CS is empty\n");
1738 rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1739 cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count);
1742 mutex_unlock(&hpriv->restore_phase_mutex);
1746 "Failed to submit restore CS for context %d (%d)\n",
1751 /* Need to wait for restore completion before execution phase */
1753 enum hl_cs_wait_status status;
1755 ret = _hl_cs_wait_ioctl(hdev, ctx,
1756 jiffies_to_usecs(hdev->timeout_jiffies),
1757 *cs_seq, &status, NULL);
1760 "Restore CS for context %d failed to complete %d\n",
1767 if (hdev->supports_ctx_switch)
1768 ctx->thread_ctx_switch_wait_token = 1;
1770 } else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) {
1771 rc = hl_poll_timeout_memory(hdev,
1772 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1773 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1775 if (rc == -ETIMEDOUT) {
1777 "context switch phase timeout (%d)\n", tmp);
1783 if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1784 hl_device_reset(hdev, 0);
1790 * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
1791 * if the SOB value reaches the max value move to the other SOB reserved
1793 * @hdev: pointer to device structure
1794 * @q_idx: stream queue index
1795 * @hw_sob: the H/W SOB used in this signal CS.
1796 * @count: signals count
1797 * @encaps_sig: tells whether it's reservation for encaps signals or not.
1799 * Note that this function must be called while hw_queues_lock is taken.
1801 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
1802 struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
1805 struct hl_sync_stream_properties *prop;
1806 struct hl_hw_sob *sob = *hw_sob, *other_sob;
1807 u8 other_sob_offset;
1809 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1813 /* check for wraparound */
1814 if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
1816 * Decrement as we reached the max value.
1817 * The release function won't be called here as we've
1818 * just incremented the refcount right before calling this
1821 hw_sob_put_err(sob);
1824 * check the other sob value, if it still in use then fail
1825 * otherwise make the switch
1827 other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
1828 other_sob = &prop->hw_sob[other_sob_offset];
1830 if (kref_read(&other_sob->kref) != 1) {
1831 dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
1837 * next_sob_val always points to the next available signal
1838 * in the sob, so in encaps signals it will be the next one
1839 * after reserving the required amount.
1842 prop->next_sob_val = count + 1;
1844 prop->next_sob_val = count;
1846 /* only two SOBs are currently in use */
1847 prop->curr_sob_offset = other_sob_offset;
1848 *hw_sob = other_sob;
1851 * check if other_sob needs reset, then do it before using it
1852 * for the reservation or the next signal cs.
1853 * we do it here, and for both encaps and regular signal cs
1854 * cases in order to avoid possible races of two kref_put
1855 * of the sob which can occur at the same time if we move the
1856 * sob reset(kref_put) to cs_do_release function.
1857 * in addition, if we have combination of cs signal and
1858 * encaps, and at the point we need to reset the sob there was
1859 * no more reservations and only signal cs keep coming,
1860 * in such case we need signal_cs to put the refcount and
1863 if (other_sob->need_reset)
1864 hw_sob_put(other_sob);
1867 /* set reset indication for the sob */
1868 sob->need_reset = true;
1869 hw_sob_get(other_sob);
1872 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
1873 prop->curr_sob_offset, q_idx);
1875 prop->next_sob_val += count;
1881 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1882 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1883 bool encaps_signals)
1885 u64 *signal_seq_arr = NULL;
1886 u32 size_to_copy, signal_seq_arr_len;
1889 if (encaps_signals) {
1890 *signal_seq = chunk->encaps_signal_seq;
1894 signal_seq_arr_len = chunk->num_signal_seq_arr;
1896 /* currently only one signal seq is supported */
1897 if (signal_seq_arr_len != 1) {
1898 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1899 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1901 "Wait for signal CS supports only one signal CS seq\n");
1905 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1906 sizeof(*signal_seq_arr),
1908 if (!signal_seq_arr)
1909 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1910 sizeof(*signal_seq_arr),
1912 if (!signal_seq_arr) {
1913 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1914 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1918 size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
1919 if (copy_from_user(signal_seq_arr,
1920 u64_to_user_ptr(chunk->signal_seq_arr),
1922 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1923 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1925 "Failed to copy signal seq array from user\n");
1930 /* currently it is guaranteed to have only one signal seq */
1931 *signal_seq = signal_seq_arr[0];
1934 kfree(signal_seq_arr);
1939 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1940 struct hl_ctx *ctx, struct hl_cs *cs,
1941 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
1943 struct hl_cs_counters_atomic *cntr;
1944 struct hl_cs_job *job;
1948 cntr = &hdev->aggregated_cs_counters;
1950 job = hl_cs_allocate_job(hdev, q_type, true);
1952 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1953 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1954 dev_err(hdev->dev, "Failed to allocate a new job\n");
1958 if (cs->type == CS_TYPE_WAIT)
1959 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1961 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1963 cb = hl_cb_kernel_create(hdev, cb_size, q_type == QUEUE_TYPE_HW);
1965 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1966 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1974 atomic_inc(&job->user_cb->cs_cnt);
1975 job->user_cb_size = cb_size;
1976 job->hw_queue_id = q_idx;
1978 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
1979 && cs->encaps_signals)
1980 job->encaps_sig_wait_offset = encaps_signal_offset;
1982 * No need in parsing, user CB is the patched CB.
1983 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1984 * the CB idr anymore and to decrement its refcount as it was
1985 * incremented inside hl_cb_kernel_create().
1987 job->patched_cb = job->user_cb;
1988 job->job_cb_size = job->user_cb_size;
1989 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
1991 /* increment refcount as for external queues we get completion */
1994 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1997 list_add_tail(&job->cs_node, &cs->job_list);
1999 hl_debugfs_add_job(hdev, job);
2004 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
2005 u32 q_idx, u32 count,
2006 u32 *handle_id, u32 *sob_addr,
2009 struct hw_queue_properties *hw_queue_prop;
2010 struct hl_sync_stream_properties *prop;
2011 struct hl_device *hdev = hpriv->hdev;
2012 struct hl_cs_encaps_sig_handle *handle;
2013 struct hl_encaps_signals_mgr *mgr;
2014 struct hl_hw_sob *hw_sob;
2018 if (count >= HL_MAX_SOB_VAL) {
2019 dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
2025 if (q_idx >= hdev->asic_prop.max_queues) {
2026 dev_err(hdev->dev, "Queue index %d is invalid\n",
2032 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
2034 if (!hw_queue_prop->supports_sync_stream) {
2036 "Queue index %d does not support sync stream operations\n",
2042 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
2044 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
2050 handle->count = count;
2052 hl_ctx_get(hpriv->ctx);
2053 handle->ctx = hpriv->ctx;
2054 mgr = &hpriv->ctx->sig_mgr;
2056 spin_lock(&mgr->lock);
2057 hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
2058 spin_unlock(&mgr->lock);
2061 dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
2066 handle->id = hdl_id;
2067 handle->q_idx = q_idx;
2068 handle->hdev = hdev;
2069 kref_init(&handle->refcount);
2071 hdev->asic_funcs->hw_queues_lock(hdev);
2073 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
2076 * Increment the SOB value by count by user request
2077 * to reserve those signals
2078 * check if the signals amount to reserve is not exceeding the max sob
2079 * value, if yes then switch sob.
2081 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
2084 dev_err(hdev->dev, "Failed to switch SOB\n");
2085 hdev->asic_funcs->hw_queues_unlock(hdev);
2089 /* set the hw_sob to the handle after calling the sob wraparound handler
2090 * since sob could have changed.
2092 handle->hw_sob = hw_sob;
2094 /* store the current sob value for unreserve validity check, and
2095 * signal offset support
2097 handle->pre_sob_val = prop->next_sob_val - handle->count;
2099 handle->cs_seq = ULLONG_MAX;
2101 *signals_count = prop->next_sob_val;
2102 hdev->asic_funcs->hw_queues_unlock(hdev);
2104 *sob_addr = handle->hw_sob->sob_addr;
2105 *handle_id = hdl_id;
2108 "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
2109 hw_sob->sob_id, handle->hw_sob->sob_addr,
2110 prop->next_sob_val - 1, q_idx, hdl_id);
2114 spin_lock(&mgr->lock);
2115 idr_remove(&mgr->handles, hdl_id);
2116 spin_unlock(&mgr->lock);
2119 hl_ctx_put(handle->ctx);
2126 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
2128 struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
2129 struct hl_sync_stream_properties *prop;
2130 struct hl_device *hdev = hpriv->hdev;
2131 struct hl_encaps_signals_mgr *mgr;
2132 struct hl_hw_sob *hw_sob;
2133 u32 q_idx, sob_addr;
2136 mgr = &hpriv->ctx->sig_mgr;
2138 spin_lock(&mgr->lock);
2139 encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
2140 if (encaps_sig_hdl) {
2141 dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
2142 handle_id, encaps_sig_hdl->hw_sob->sob_addr,
2143 encaps_sig_hdl->count);
2145 hdev->asic_funcs->hw_queues_lock(hdev);
2147 q_idx = encaps_sig_hdl->q_idx;
2148 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
2149 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
2150 sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
2152 /* Check if sob_val got out of sync due to other
2153 * signal submission requests which were handled
2154 * between the reserve-unreserve calls or SOB switch
2155 * upon reaching SOB max value.
2157 if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
2158 != prop->next_sob_val ||
2159 sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
2160 dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
2161 encaps_sig_hdl->pre_sob_val,
2162 (prop->next_sob_val - encaps_sig_hdl->count));
2164 hdev->asic_funcs->hw_queues_unlock(hdev);
2170 * Decrement the SOB value by count by user request
2171 * to unreserve those signals
2173 prop->next_sob_val -= encaps_sig_hdl->count;
2175 hdev->asic_funcs->hw_queues_unlock(hdev);
2179 /* Release the id and free allocated memory of the handle */
2180 idr_remove(&mgr->handles, handle_id);
2182 /* unlock before calling ctx_put, where we might sleep */
2183 spin_unlock(&mgr->lock);
2184 hl_ctx_put(encaps_sig_hdl->ctx);
2185 kfree(encaps_sig_hdl);
2189 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
2193 spin_unlock(&mgr->lock);
2199 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
2200 void __user *chunks, u32 num_chunks,
2201 u64 *cs_seq, u32 flags, u32 timeout,
2202 u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count)
2204 struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
2205 bool handle_found = false, is_wait_cs = false,
2206 wait_cs_submitted = false,
2207 cs_encaps_signals = false;
2208 struct hl_cs_chunk *cs_chunk_array, *chunk;
2209 bool staged_cs_with_encaps_signals = false;
2210 struct hw_queue_properties *hw_queue_prop;
2211 struct hl_device *hdev = hpriv->hdev;
2212 struct hl_cs_compl *sig_waitcs_cmpl;
2213 u32 q_idx, collective_engine_id = 0;
2214 struct hl_cs_counters_atomic *cntr;
2215 struct hl_fence *sig_fence = NULL;
2216 struct hl_ctx *ctx = hpriv->ctx;
2217 enum hl_queue_type q_type;
2222 cntr = &hdev->aggregated_cs_counters;
2223 *cs_seq = ULLONG_MAX;
2225 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
2230 /* currently it is guaranteed to have only one chunk */
2231 chunk = &cs_chunk_array[0];
2233 if (chunk->queue_index >= hdev->asic_prop.max_queues) {
2234 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2235 atomic64_inc(&cntr->validation_drop_cnt);
2236 dev_err(hdev->dev, "Queue index %d is invalid\n",
2237 chunk->queue_index);
2239 goto free_cs_chunk_array;
2242 q_idx = chunk->queue_index;
2243 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
2244 q_type = hw_queue_prop->type;
2246 if (!hw_queue_prop->supports_sync_stream) {
2247 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2248 atomic64_inc(&cntr->validation_drop_cnt);
2250 "Queue index %d does not support sync stream operations\n",
2253 goto free_cs_chunk_array;
2256 if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
2257 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
2258 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2259 atomic64_inc(&cntr->validation_drop_cnt);
2261 "Queue index %d is invalid\n", q_idx);
2263 goto free_cs_chunk_array;
2266 if (!hdev->nic_ports_mask) {
2267 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2268 atomic64_inc(&cntr->validation_drop_cnt);
2270 "Collective operations not supported when NIC ports are disabled");
2272 goto free_cs_chunk_array;
2275 collective_engine_id = chunk->collective_engine_id;
2278 is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
2279 cs_type == CS_TYPE_COLLECTIVE_WAIT);
2281 cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
2284 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
2285 ctx, cs_encaps_signals);
2287 goto free_cs_chunk_array;
2289 if (cs_encaps_signals) {
2290 /* check if cs sequence has encapsulated
2296 spin_lock(&ctx->sig_mgr.lock);
2297 idp = &ctx->sig_mgr.handles;
2298 idr_for_each_entry(idp, encaps_sig_hdl, id) {
2299 if (encaps_sig_hdl->cs_seq == signal_seq) {
2300 /* get refcount to protect removing this handle from idr,
2301 * needed when multiple wait cs are used with offset
2302 * to wait on reserved encaps signals.
2303 * Since kref_put of this handle is executed outside the
2304 * current lock, it is possible that the handle refcount
2305 * is 0 but it yet to be removed from the list. In this
2306 * case need to consider the handle as not valid.
2308 if (kref_get_unless_zero(&encaps_sig_hdl->refcount))
2309 handle_found = true;
2313 spin_unlock(&ctx->sig_mgr.lock);
2315 if (!handle_found) {
2316 /* treat as signal CS already finished */
2317 dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
2320 goto free_cs_chunk_array;
2323 /* validate also the signal offset value */
2324 if (chunk->encaps_signal_offset >
2325 encaps_sig_hdl->count) {
2326 dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
2327 chunk->encaps_signal_offset,
2328 encaps_sig_hdl->count);
2330 goto free_cs_chunk_array;
2334 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2335 if (IS_ERR(sig_fence)) {
2336 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2337 atomic64_inc(&cntr->validation_drop_cnt);
2339 "Failed to get signal CS with seq 0x%llx\n",
2341 rc = PTR_ERR(sig_fence);
2342 goto free_cs_chunk_array;
2346 /* signal CS already finished */
2348 goto free_cs_chunk_array;
2352 container_of(sig_fence, struct hl_cs_compl, base_fence);
2354 staged_cs_with_encaps_signals = !!
2355 (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
2356 (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
2358 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
2359 !staged_cs_with_encaps_signals) {
2360 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2361 atomic64_inc(&cntr->validation_drop_cnt);
2363 "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
2365 hl_fence_put(sig_fence);
2367 goto free_cs_chunk_array;
2370 if (completion_done(&sig_fence->completion)) {
2371 /* signal CS already finished */
2372 hl_fence_put(sig_fence);
2374 goto free_cs_chunk_array;
2378 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
2381 hl_fence_put(sig_fence);
2383 goto free_cs_chunk_array;
2387 * Save the signal CS fence for later initialization right before
2388 * hanging the wait CS on the queue.
2389 * for encaps signals case, we save the cs sequence and handle pointer
2390 * for later initialization.
2393 cs->signal_fence = sig_fence;
2394 /* store the handle pointer, so we don't have to
2395 * look for it again, later on the flow
2396 * when we need to set SOB info in hw_queue.
2398 if (cs->encaps_signals)
2399 cs->encaps_sig_hdl = encaps_sig_hdl;
2402 hl_debugfs_add_cs(cs);
2404 *cs_seq = cs->sequence;
2406 if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
2407 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
2408 q_idx, chunk->encaps_signal_offset);
2409 else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
2410 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
2411 cs, q_idx, collective_engine_id,
2412 chunk->encaps_signal_offset);
2414 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2415 atomic64_inc(&cntr->validation_drop_cnt);
2420 goto free_cs_object;
2422 if (q_type == QUEUE_TYPE_HW)
2423 INIT_WORK(&cs->finish_work, cs_completion);
2425 rc = hl_hw_queue_schedule_cs(cs);
2427 /* In case wait cs failed here, it means the signal cs
2428 * already completed. we want to free all it's related objects
2429 * but we don't want to fail the ioctl.
2433 else if (rc != -EAGAIN)
2435 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
2436 ctx->asid, cs->sequence, rc);
2437 goto free_cs_object;
2440 *signal_sob_addr_offset = cs->sob_addr_offset;
2441 *signal_initial_sob_count = cs->initial_sob_count;
2443 rc = HL_CS_STATUS_SUCCESS;
2445 wait_cs_submitted = true;
2449 cs_rollback(hdev, cs);
2450 *cs_seq = ULLONG_MAX;
2451 /* The path below is both for good and erroneous exits */
2453 /* We finished with the CS in this function, so put the ref */
2455 free_cs_chunk_array:
2456 if (!wait_cs_submitted && cs_encaps_signals && handle_found && is_wait_cs)
2457 kref_put(&encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
2458 kfree(cs_chunk_array);
2463 static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
2464 u32 num_engine_cores, u32 core_command)
2466 struct hl_device *hdev = hpriv->hdev;
2467 void __user *engine_cores_arr;
2471 if (!hdev->asic_prop.supports_engine_modes)
2474 if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
2475 dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
2479 if (core_command != HL_ENGINE_CORE_RUN && core_command != HL_ENGINE_CORE_HALT) {
2480 dev_err(hdev->dev, "Engine core command is invalid\n");
2484 engine_cores_arr = (void __user *) (uintptr_t) engine_cores;
2485 cores = kmalloc_array(num_engine_cores, sizeof(u32), GFP_KERNEL);
2489 if (copy_from_user(cores, engine_cores_arr, num_engine_cores * sizeof(u32))) {
2490 dev_err(hdev->dev, "Failed to copy core-ids array from user\n");
2495 rc = hdev->asic_funcs->set_engine_cores(hdev, cores, num_engine_cores, core_command);
2501 static int cs_ioctl_engines(struct hl_fpriv *hpriv, u64 engines_arr_user_addr,
2502 u32 num_engines, enum hl_engine_command command)
2504 struct hl_device *hdev = hpriv->hdev;
2505 u32 *engines, max_num_of_engines;
2506 void __user *engines_arr;
2509 if (!hdev->asic_prop.supports_engine_modes)
2512 if (command >= HL_ENGINE_COMMAND_MAX) {
2513 dev_err(hdev->dev, "Engine command is invalid\n");
2517 max_num_of_engines = hdev->asic_prop.max_num_of_engines;
2518 if (command == HL_ENGINE_CORE_RUN || command == HL_ENGINE_CORE_HALT)
2519 max_num_of_engines = hdev->asic_prop.num_engine_cores;
2521 if (!num_engines || num_engines > max_num_of_engines) {
2522 dev_err(hdev->dev, "Number of engines %d is invalid\n", num_engines);
2526 engines_arr = (void __user *) (uintptr_t) engines_arr_user_addr;
2527 engines = kmalloc_array(num_engines, sizeof(u32), GFP_KERNEL);
2531 if (copy_from_user(engines, engines_arr, num_engines * sizeof(u32))) {
2532 dev_err(hdev->dev, "Failed to copy engine-ids array from user\n");
2537 rc = hdev->asic_funcs->set_engines(hdev, engines, num_engines, command);
2543 static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
2545 struct hl_device *hdev = hpriv->hdev;
2546 struct asic_fixed_properties *prop = &hdev->asic_prop;
2548 if (!prop->hbw_flush_reg) {
2549 dev_dbg(hdev->dev, "HBW flush is not supported\n");
2553 RREG32(prop->hbw_flush_reg);
2558 int hl_cs_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
2560 struct hl_fpriv *hpriv = file_priv->driver_priv;
2561 union hl_cs_args *args = data;
2562 enum hl_cs_type cs_type = 0;
2563 u64 cs_seq = ULONG_MAX;
2564 void __user *chunks;
2565 u32 num_chunks, flags, timeout,
2566 signals_count = 0, sob_addr = 0, handle_id = 0;
2567 u16 sob_initial_count = 0;
2570 rc = hl_cs_sanity_checks(hpriv, args);
2574 rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
2578 cs_type = hl_cs_get_cs_type(args->in.cs_flags &
2579 ~HL_CS_FLAGS_FORCE_RESTORE);
2580 chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
2581 num_chunks = args->in.num_chunks_execute;
2582 flags = args->in.cs_flags;
2584 /* In case this is a staged CS, user should supply the CS sequence */
2585 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
2586 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
2587 cs_seq = args->in.seq;
2589 timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
2590 ? msecs_to_jiffies(args->in.timeout * 1000)
2591 : hpriv->hdev->timeout_jiffies;
2594 case CS_TYPE_SIGNAL:
2596 case CS_TYPE_COLLECTIVE_WAIT:
2597 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
2598 &cs_seq, args->in.cs_flags, timeout,
2599 &sob_addr, &sob_initial_count);
2601 case CS_RESERVE_SIGNALS:
2602 rc = cs_ioctl_reserve_signals(hpriv,
2603 args->in.encaps_signals_q_idx,
2604 args->in.encaps_signals_count,
2605 &handle_id, &sob_addr, &signals_count);
2607 case CS_UNRESERVE_SIGNALS:
2608 rc = cs_ioctl_unreserve_signals(hpriv,
2609 args->in.encaps_sig_handle_id);
2611 case CS_TYPE_ENGINE_CORE:
2612 rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
2613 args->in.num_engine_cores, args->in.core_command);
2615 case CS_TYPE_ENGINES:
2616 rc = cs_ioctl_engines(hpriv, args->in.engines,
2617 args->in.num_engines, args->in.engine_command);
2619 case CS_TYPE_FLUSH_PCI_HBW_WRITES:
2620 rc = cs_ioctl_flush_pci_hbw_writes(hpriv);
2623 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
2625 args->in.encaps_sig_handle_id,
2626 timeout, &sob_initial_count);
2630 if (rc != -EAGAIN) {
2631 memset(args, 0, sizeof(*args));
2634 case CS_RESERVE_SIGNALS:
2635 args->out.handle_id = handle_id;
2636 args->out.sob_base_addr_offset = sob_addr;
2637 args->out.count = signals_count;
2639 case CS_TYPE_SIGNAL:
2640 args->out.sob_base_addr_offset = sob_addr;
2641 args->out.sob_count_before_submission = sob_initial_count;
2642 args->out.seq = cs_seq;
2644 case CS_TYPE_DEFAULT:
2645 args->out.sob_count_before_submission = sob_initial_count;
2646 args->out.seq = cs_seq;
2649 args->out.seq = cs_seq;
2653 args->out.status = rc;
2659 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2660 enum hl_cs_wait_status *status, u64 timeout_us, s64 *timestamp)
2662 struct hl_device *hdev = ctx->hdev;
2663 ktime_t timestamp_kt;
2667 if (IS_ERR(fence)) {
2668 rc = PTR_ERR(fence);
2670 dev_notice_ratelimited(hdev->dev,
2671 "Can't wait on CS %llu because current CS is at seq %llu\n",
2672 seq, ctx->cs_sequence);
2677 if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, ×tamp_kt, &error)) {
2679 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
2680 seq, ctx->cs_sequence);
2681 *status = CS_WAIT_STATUS_GONE;
2686 goto report_results;
2690 completion_rc = completion_done(&fence->completion);
2692 unsigned long timeout;
2694 timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
2695 timeout_us : usecs_to_jiffies(timeout_us);
2697 wait_for_completion_interruptible_timeout(
2698 &fence->completion, timeout);
2701 error = fence->error;
2702 timestamp_kt = fence->timestamp;
2705 if (completion_rc > 0) {
2706 *status = CS_WAIT_STATUS_COMPLETED;
2708 *timestamp = ktime_to_ns(timestamp_kt);
2710 *status = CS_WAIT_STATUS_BUSY;
2713 if (completion_rc == -ERESTARTSYS)
2715 else if (error == -ETIMEDOUT || error == -EIO)
2722 * hl_cs_poll_fences - iterate CS fences to check for CS completion
2724 * @mcs_data: multi-CS internal data
2725 * @mcs_compl: multi-CS completion structure
2727 * @return 0 on success, otherwise non 0 error code
2729 * The function iterates on all CS sequence in the list and set bit in
2730 * completion_bitmap for each completed CS.
2731 * While iterating, the function sets the stream map of each fence in the fence
2732 * array in the completion QID stream map to be used by CSs to perform
2733 * completion to the multi-CS context.
2734 * This function shall be called after taking context ref
2736 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl)
2738 struct hl_fence **fence_ptr = mcs_data->fence_arr;
2739 struct hl_device *hdev = mcs_data->ctx->hdev;
2740 int i, rc, arr_len = mcs_data->arr_len;
2741 u64 *seq_arr = mcs_data->seq_arr;
2742 ktime_t max_ktime, first_cs_time;
2743 enum hl_cs_wait_status status;
2745 memset(fence_ptr, 0, arr_len * sizeof(struct hl_fence *));
2747 /* get all fences under the same lock */
2748 rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2753 * re-initialize the completion here to handle 2 possible cases:
2754 * 1. CS will complete the multi-CS prior clearing the completion. in which
2755 * case the fence iteration is guaranteed to catch the CS completion.
2756 * 2. the completion will occur after re-init of the completion.
2757 * in which case we will wake up immediately in wait_for_completion.
2759 reinit_completion(&mcs_compl->completion);
2762 * set to maximum time to verify timestamp is valid: if at the end
2763 * this value is maintained- no timestamp was updated
2765 max_ktime = ktime_set(KTIME_SEC_MAX, 0);
2766 first_cs_time = max_ktime;
2768 for (i = 0; i < arr_len; i++, fence_ptr++) {
2769 struct hl_fence *fence = *fence_ptr;
2772 * In order to prevent case where we wait until timeout even though a CS associated
2773 * with the multi-CS actually completed we do things in the below order:
2774 * 1. for each fence set it's QID map in the multi-CS completion QID map. This way
2775 * any CS can, potentially, complete the multi CS for the specific QID (note
2776 * that once completion is initialized, calling complete* and then wait on the
2777 * completion will cause it to return at once)
2778 * 2. only after allowing multi-CS completion for the specific QID we check whether
2779 * the specific CS already completed (and thus the wait for completion part will
2780 * be skipped). if the CS not completed it is guaranteed that completing CS will
2781 * wake up the completion.
2784 mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map;
2787 * function won't sleep as it is called with timeout 0 (i.e.
2790 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL);
2793 "wait_for_fence error :%d for CS seq %llu\n",
2799 case CS_WAIT_STATUS_BUSY:
2800 /* CS did not finished, QID to wait on already stored */
2802 case CS_WAIT_STATUS_COMPLETED:
2804 * Using mcs_handling_done to avoid possibility of mcs_data
2805 * returns to user indicating CS completed before it finished
2806 * all of its mcs handling, to avoid race the next time the
2807 * user waits for mcs.
2808 * note: when reaching this case fence is definitely not NULL
2809 * but NULL check was added to overcome static analysis
2811 if (fence && !fence->mcs_handling_done) {
2813 * in case multi CS is completed but MCS handling not done
2814 * we "complete" the multi CS to prevent it from waiting
2815 * until time-out and the "multi-CS handling done" will have
2816 * another chance at the next iteration
2818 complete_all(&mcs_compl->completion);
2822 mcs_data->completion_bitmap |= BIT(i);
2824 * For all completed CSs we take the earliest timestamp.
2825 * For this we have to validate that the timestamp is
2826 * earliest of all timestamps so far.
2828 if (fence && mcs_data->update_ts &&
2829 (ktime_compare(fence->timestamp, first_cs_time) < 0))
2830 first_cs_time = fence->timestamp;
2832 case CS_WAIT_STATUS_GONE:
2833 mcs_data->update_ts = false;
2834 mcs_data->gone_cs = true;
2836 * It is possible to get an old sequence numbers from user
2837 * which related to already completed CSs and their fences
2838 * already gone. In this case, CS set as completed but
2839 * no need to consider its QID for mcs completion.
2841 mcs_data->completion_bitmap |= BIT(i);
2844 dev_err(hdev->dev, "Invalid fence status\n");
2851 hl_fences_put(mcs_data->fence_arr, arr_len);
2853 if (mcs_data->update_ts &&
2854 (ktime_compare(first_cs_time, max_ktime) != 0))
2855 mcs_data->timestamp = ktime_to_ns(first_cs_time);
2860 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
2861 enum hl_cs_wait_status *status, s64 *timestamp)
2863 struct hl_fence *fence;
2871 fence = hl_ctx_get_fence(ctx, seq);
2873 rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2874 hl_fence_put(fence);
2880 static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs)
2882 if (usecs <= U32_MAX)
2883 return usecs_to_jiffies(usecs);
2886 * If the value in nanoseconds is larger than 64 bit, use the largest
2889 if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC)))
2890 return nsecs_to_jiffies(U64_MAX);
2892 return nsecs_to_jiffies(usecs * NSEC_PER_USEC);
2896 * hl_wait_multi_cs_completion_init - init completion structure
2898 * @hdev: pointer to habanalabs device structure
2899 * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
2900 * master QID to wait on
2902 * @return valid completion struct pointer on success, otherwise error pointer
2904 * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
2905 * the function gets the first available completion (by marking it "used")
2906 * and initialize its values.
2908 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev)
2910 struct multi_cs_completion *mcs_compl;
2913 /* find free multi_cs completion structure */
2914 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2915 mcs_compl = &hdev->multi_cs_completion[i];
2916 spin_lock(&mcs_compl->lock);
2917 if (!mcs_compl->used) {
2918 mcs_compl->used = 1;
2919 mcs_compl->timestamp = 0;
2921 * init QID map to 0 to avoid completion by CSs. the actual QID map
2922 * to multi-CS CSs will be set incrementally at a later stage
2924 mcs_compl->stream_master_qid_map = 0;
2925 spin_unlock(&mcs_compl->lock);
2928 spin_unlock(&mcs_compl->lock);
2931 if (i == MULTI_CS_MAX_USER_CTX) {
2932 dev_err(hdev->dev, "no available multi-CS completion structure\n");
2933 return ERR_PTR(-ENOMEM);
2939 * hl_wait_multi_cs_completion_fini - return completion structure and set as
2942 * @mcs_compl: pointer to the completion structure
2944 static void hl_wait_multi_cs_completion_fini(
2945 struct multi_cs_completion *mcs_compl)
2948 * free completion structure, do it under lock to be in-sync with the
2949 * thread that signals completion
2951 spin_lock(&mcs_compl->lock);
2952 mcs_compl->used = 0;
2953 spin_unlock(&mcs_compl->lock);
2957 * hl_wait_multi_cs_completion - wait for first CS to complete
2959 * @mcs_data: multi-CS internal data
2961 * @return 0 on success, otherwise non 0 error code
2963 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data,
2964 struct multi_cs_completion *mcs_compl)
2968 completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion,
2969 mcs_data->timeout_jiffies);
2971 /* update timestamp */
2972 if (completion_rc > 0)
2973 mcs_data->timestamp = mcs_compl->timestamp;
2975 if (completion_rc == -ERESTARTSYS)
2976 return completion_rc;
2978 mcs_data->wait_status = completion_rc;
2984 * hl_multi_cs_completion_init - init array of multi-CS completion structures
2986 * @hdev: pointer to habanalabs device structure
2988 void hl_multi_cs_completion_init(struct hl_device *hdev)
2990 struct multi_cs_completion *mcs_cmpl;
2993 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2994 mcs_cmpl = &hdev->multi_cs_completion[i];
2996 spin_lock_init(&mcs_cmpl->lock);
2997 init_completion(&mcs_cmpl->completion);
3002 * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
3004 * @hpriv: pointer to the private data of the fd
3005 * @data: pointer to multi-CS wait ioctl in/out args
3008 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3010 struct multi_cs_completion *mcs_compl;
3011 struct hl_device *hdev = hpriv->hdev;
3012 struct multi_cs_data mcs_data = {};
3013 union hl_wait_cs_args *args = data;
3014 struct hl_ctx *ctx = hpriv->ctx;
3015 struct hl_fence **fence_arr;
3016 void __user *seq_arr;
3022 for (i = 0 ; i < sizeof(args->in.pad) ; i++)
3023 if (args->in.pad[i]) {
3024 dev_dbg(hdev->dev, "Padding bytes must be 0\n");
3028 if (!hdev->supports_wait_for_multi_cs) {
3029 dev_err(hdev->dev, "Wait for multi CS is not supported\n");
3033 seq_arr_len = args->in.seq_arr_len;
3035 if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
3036 dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
3037 HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
3041 /* allocate memory for sequence array */
3043 kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
3047 /* copy CS sequence array from user */
3048 seq_arr = (void __user *) (uintptr_t) args->in.seq;
3049 size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
3050 if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
3051 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
3056 /* allocate array for the fences */
3057 fence_arr = kmalloc_array(seq_arr_len, sizeof(struct hl_fence *), GFP_KERNEL);
3063 /* initialize the multi-CS internal data */
3065 mcs_data.seq_arr = cs_seq_arr;
3066 mcs_data.fence_arr = fence_arr;
3067 mcs_data.arr_len = seq_arr_len;
3071 /* wait (with timeout) for the first CS to be completed */
3072 mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
3073 mcs_compl = hl_wait_multi_cs_completion_init(hdev);
3074 if (IS_ERR(mcs_compl)) {
3075 rc = PTR_ERR(mcs_compl);
3079 /* poll all CS fences, extract timestamp */
3080 mcs_data.update_ts = true;
3081 rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
3083 * skip wait for CS completion when one of the below is true:
3084 * - an error on the poll function
3085 * - one or more CS in the list completed
3086 * - the user called ioctl with timeout 0
3088 if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
3089 goto completion_fini;
3092 rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl);
3093 if (rc || (mcs_data.wait_status == 0))
3097 * poll fences once again to update the CS map.
3098 * no timestamp should be updated this time.
3100 mcs_data.update_ts = false;
3101 rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
3103 if (rc || mcs_data.completion_bitmap)
3107 * if hl_wait_multi_cs_completion returned before timeout (i.e.
3108 * it got a completion) it either got completed by CS in the multi CS list
3109 * (in which case the indication will be non empty completion_bitmap) or it
3110 * got completed by CS submitted to one of the shared stream master but
3111 * not in the multi CS list (in which case we should wait again but modify
3112 * the timeout and set timestamp as zero to let a CS related to the current
3113 * multi-CS set a new, relevant, timestamp)
3115 mcs_data.timeout_jiffies = mcs_data.wait_status;
3116 mcs_compl->timestamp = 0;
3120 hl_wait_multi_cs_completion_fini(mcs_compl);
3129 if (rc == -ERESTARTSYS) {
3130 dev_err_ratelimited(hdev->dev,
3131 "user process got signal while waiting for Multi-CS\n");
3138 /* update output args */
3139 memset(args, 0, sizeof(*args));
3141 if (mcs_data.completion_bitmap) {
3142 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
3143 args->out.cs_completion_map = mcs_data.completion_bitmap;
3145 /* if timestamp not 0- it's valid */
3146 if (mcs_data.timestamp) {
3147 args->out.timestamp_nsec = mcs_data.timestamp;
3148 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3151 /* update if some CS was gone */
3152 if (!mcs_data.timestamp)
3153 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
3155 args->out.status = HL_WAIT_CS_STATUS_BUSY;
3161 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3163 struct hl_device *hdev = hpriv->hdev;
3164 union hl_wait_cs_args *args = data;
3165 enum hl_cs_wait_status status;
3166 u64 seq = args->in.seq;
3170 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, ×tamp);
3172 if (rc == -ERESTARTSYS) {
3173 dev_err_ratelimited(hdev->dev,
3174 "user process got signal while waiting for CS handle %llu\n",
3179 memset(args, 0, sizeof(*args));
3182 if (rc == -ETIMEDOUT) {
3183 dev_err_ratelimited(hdev->dev,
3184 "CS %llu has timed-out while user process is waiting for it\n",
3186 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
3187 } else if (rc == -EIO) {
3188 dev_err_ratelimited(hdev->dev,
3189 "CS %llu has been aborted while user process is waiting for it\n",
3191 args->out.status = HL_WAIT_CS_STATUS_ABORTED;
3197 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3198 args->out.timestamp_nsec = timestamp;
3202 case CS_WAIT_STATUS_GONE:
3203 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
3205 case CS_WAIT_STATUS_COMPLETED:
3206 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
3208 case CS_WAIT_STATUS_BUSY:
3210 args->out.status = HL_WAIT_CS_STATUS_BUSY;
3217 static inline void set_record_cq_info(struct hl_user_pending_interrupt *record,
3218 struct hl_cb *cq_cb, u32 cq_offset, u32 target_value)
3220 record->ts_reg_info.cq_cb = cq_cb;
3221 record->cq_kernel_addr = (u64 *) cq_cb->kernel_address + cq_offset;
3222 record->cq_target_value = target_value;
3225 static int validate_and_get_ts_record(struct device *dev,
3226 struct hl_ts_buff *ts_buff, u64 ts_offset,
3227 struct hl_user_pending_interrupt **req_event_record)
3229 struct hl_user_pending_interrupt *ts_cb_last;
3231 *req_event_record = (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3233 ts_cb_last = (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
3234 (ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt));
3236 /* Validate ts_offset not exceeding last max */
3237 if (*req_event_record >= ts_cb_last) {
3238 dev_err(dev, "Ts offset(%llu) exceeds max CB offset(0x%llx)\n",
3239 ts_offset, (u64)(uintptr_t)ts_cb_last);
3246 static void unregister_timestamp_node(struct hl_device *hdev,
3247 struct hl_user_pending_interrupt *record, bool need_lock)
3249 struct hl_user_interrupt *interrupt = record->ts_reg_info.interrupt;
3250 bool ts_rec_found = false;
3251 unsigned long flags;
3254 spin_lock_irqsave(&interrupt->ts_list_lock, flags);
3256 if (record->ts_reg_info.in_use) {
3257 record->ts_reg_info.in_use = false;
3258 list_del(&record->list_node);
3259 ts_rec_found = true;
3263 spin_unlock_irqrestore(&interrupt->ts_list_lock, flags);
3265 /* Put refcounts that were taken when we registered the event */
3267 hl_mmap_mem_buf_put(record->ts_reg_info.buf);
3268 hl_cb_put(record->ts_reg_info.cq_cb);
3272 static int ts_get_and_handle_kernel_record(struct hl_device *hdev, struct hl_ctx *ctx,
3273 struct wait_interrupt_data *data, unsigned long *flags,
3274 struct hl_user_pending_interrupt **pend)
3276 struct hl_user_pending_interrupt *req_offset_record;
3277 struct hl_ts_buff *ts_buff = data->buf->private;
3278 bool need_lock = false;
3281 rc = validate_and_get_ts_record(data->buf->mmg->dev, ts_buff, data->ts_offset,
3282 &req_offset_record);
3286 /* In case the node already registered, need to unregister first then re-use */
3287 if (req_offset_record->ts_reg_info.in_use) {
3288 dev_dbg(data->buf->mmg->dev,
3289 "Requested record %p is in use on irq: %u ts addr: %p, unregister first then put on irq: %u\n",
3291 req_offset_record->ts_reg_info.interrupt->interrupt_id,
3292 req_offset_record->ts_reg_info.timestamp_kernel_addr,
3293 data->interrupt->interrupt_id);
3295 * Since interrupt here can be different than the one the node currently registered
3296 * on, and we don't want to lock two lists while we're doing unregister, so
3297 * unlock the new interrupt wait list here and acquire the lock again after you done
3299 if (data->interrupt->interrupt_id !=
3300 req_offset_record->ts_reg_info.interrupt->interrupt_id) {
3303 spin_unlock_irqrestore(&data->interrupt->ts_list_lock, *flags);
3306 unregister_timestamp_node(hdev, req_offset_record, need_lock);
3309 spin_lock_irqsave(&data->interrupt->ts_list_lock, *flags);
3312 /* Fill up the new registration node info and add it to the list */
3313 req_offset_record->ts_reg_info.in_use = true;
3314 req_offset_record->ts_reg_info.buf = data->buf;
3315 req_offset_record->ts_reg_info.timestamp_kernel_addr =
3316 (u64 *) ts_buff->user_buff_address + data->ts_offset;
3317 req_offset_record->ts_reg_info.interrupt = data->interrupt;
3318 set_record_cq_info(req_offset_record, data->cq_cb, data->cq_offset,
3319 data->target_value);
3321 *pend = req_offset_record;
3326 static int _hl_interrupt_ts_reg_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
3327 struct wait_interrupt_data *data,
3328 u32 *status, u64 *timestamp)
3330 struct hl_user_pending_interrupt *pend;
3331 unsigned long flags;
3336 data->cq_cb = hl_cb_get(data->mmg, data->cq_handle);
3342 /* Validate the cq offset */
3343 if (((u64 *) data->cq_cb->kernel_address + data->cq_offset) >=
3344 ((u64 *) data->cq_cb->kernel_address + (data->cq_cb->size / sizeof(u64)))) {
3349 dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, handle: 0x%llx, ts offset: %llu, cq_offset: %llu\n",
3350 data->interrupt->interrupt_id, data->ts_handle,
3351 data->ts_offset, data->cq_offset);
3353 data->buf = hl_mmap_mem_buf_get(data->mmg, data->ts_handle);
3359 spin_lock_irqsave(&data->interrupt->ts_list_lock, flags);
3361 /* get ts buffer record */
3362 rc = ts_get_and_handle_kernel_record(hdev, ctx, data, &flags, &pend);
3364 spin_unlock_irqrestore(&data->interrupt->ts_list_lock, flags);
3368 /* We check for completion value as interrupt could have been received
3369 * before we add the timestamp node to the ts list.
3371 if (*pend->cq_kernel_addr >= data->target_value) {
3372 spin_unlock_irqrestore(&data->interrupt->ts_list_lock, flags);
3374 dev_dbg(hdev->dev, "Target value already reached release ts record: pend: %p, offset: %llu, interrupt: %u\n",
3375 pend, data->ts_offset, data->interrupt->interrupt_id);
3377 pend->ts_reg_info.in_use = 0;
3378 *status = HL_WAIT_CS_STATUS_COMPLETED;
3379 *pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();
3384 list_add_tail(&pend->list_node, &data->interrupt->ts_list_head);
3385 spin_unlock_irqrestore(&data->interrupt->ts_list_lock, flags);
3387 rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
3394 hl_mmap_mem_buf_put(data->buf);
3396 hl_cb_put(data->cq_cb);
3403 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
3404 struct wait_interrupt_data *data,
3405 u32 *status, u64 *timestamp)
3407 struct hl_user_pending_interrupt *pend;
3408 unsigned long timeout, flags;
3412 timeout = hl_usecs64_to_jiffies(data->intr_timeout_us);
3416 data->cq_cb = hl_cb_get(data->mmg, data->cq_handle);
3422 /* Validate the cq offset */
3423 if (((u64 *) data->cq_cb->kernel_address + data->cq_offset) >=
3424 ((u64 *) data->cq_cb->kernel_address + (data->cq_cb->size / sizeof(u64)))) {
3429 pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3435 hl_fence_init(&pend->fence, ULONG_MAX);
3436 pend->cq_kernel_addr = (u64 *) data->cq_cb->kernel_address + data->cq_offset;
3437 pend->cq_target_value = data->target_value;
3438 spin_lock_irqsave(&data->interrupt->wait_list_lock, flags);
3441 /* We check for completion value as interrupt could have been received
3442 * before we add the wait node to the wait list.
3444 if (*pend->cq_kernel_addr >= data->target_value || (!data->intr_timeout_us)) {
3445 spin_unlock_irqrestore(&data->interrupt->wait_list_lock, flags);
3447 if (*pend->cq_kernel_addr >= data->target_value)
3448 *status = HL_WAIT_CS_STATUS_COMPLETED;
3450 *status = HL_WAIT_CS_STATUS_BUSY;
3452 pend->fence.timestamp = ktime_get();
3456 /* Add pending user interrupt to relevant list for the interrupt
3457 * handler to monitor.
3458 * Note that we cannot have sorted list by target value,
3459 * in order to shorten the list pass loop, since
3460 * same list could have nodes for different cq counter handle.
3462 list_add_tail(&pend->list_node, &data->interrupt->wait_list_head);
3463 spin_unlock_irqrestore(&data->interrupt->wait_list_lock, flags);
3465 /* Wait for interrupt handler to signal completion */
3466 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3468 if (completion_rc > 0) {
3469 if (pend->fence.error == -EIO) {
3470 dev_err_ratelimited(hdev->dev,
3471 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3474 *status = HL_WAIT_CS_STATUS_ABORTED;
3476 *status = HL_WAIT_CS_STATUS_COMPLETED;
3479 if (completion_rc == -ERESTARTSYS) {
3480 dev_err_ratelimited(hdev->dev,
3481 "user process got signal while waiting for interrupt ID %d\n",
3482 data->interrupt->interrupt_id);
3484 *status = HL_WAIT_CS_STATUS_ABORTED;
3486 /* The wait has timed-out. We don't know anything beyond that
3487 * because the workload was not submitted through the driver.
3488 * Therefore, from driver's perspective, the workload is still
3492 *status = HL_WAIT_CS_STATUS_BUSY;
3497 * We keep removing the node from list here, and not at the irq handler
3498 * for completion timeout case. and if it's a registration
3499 * for ts record, the node will be deleted in the irq handler after
3500 * we reach the target value.
3502 spin_lock_irqsave(&data->interrupt->wait_list_lock, flags);
3503 list_del(&pend->list_node);
3504 spin_unlock_irqrestore(&data->interrupt->wait_list_lock, flags);
3507 *timestamp = ktime_to_ns(pend->fence.timestamp);
3509 hl_cb_put(data->cq_cb);
3515 hl_cb_put(data->cq_cb);
3522 static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
3523 u64 timeout_us, u64 user_address,
3524 u64 target_value, struct hl_user_interrupt *interrupt,
3528 struct hl_user_pending_interrupt *pend;
3529 unsigned long timeout, flags;
3530 u64 completion_value;
3534 timeout = hl_usecs64_to_jiffies(timeout_us);
3538 pend = kzalloc(sizeof(*pend), GFP_KERNEL);
3544 hl_fence_init(&pend->fence, ULONG_MAX);
3546 /* Add pending user interrupt to relevant list for the interrupt
3547 * handler to monitor
3549 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3550 list_add_tail(&pend->list_node, &interrupt->wait_list_head);
3551 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3553 /* We check for completion value as interrupt could have been received
3554 * before we added the node to the wait list
3556 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3557 dev_err(hdev->dev, "Failed to copy completion value from user\n");
3559 goto remove_pending_user_interrupt;
3562 if (completion_value >= target_value) {
3563 *status = HL_WAIT_CS_STATUS_COMPLETED;
3564 /* There was no interrupt, we assume the completion is now. */
3565 pend->fence.timestamp = ktime_get();
3567 *status = HL_WAIT_CS_STATUS_BUSY;
3570 if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED))
3571 goto remove_pending_user_interrupt;
3574 /* Wait for interrupt handler to signal completion */
3575 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
3578 /* If timeout did not expire we need to perform the comparison.
3579 * If comparison fails, keep waiting until timeout expires
3581 if (completion_rc > 0) {
3582 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3583 /* reinit_completion must be called before we check for user
3584 * completion value, otherwise, if interrupt is received after
3585 * the comparison and before the next wait_for_completion,
3586 * we will reach timeout and fail
3588 reinit_completion(&pend->fence.completion);
3589 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3591 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
3592 dev_err(hdev->dev, "Failed to copy completion value from user\n");
3595 goto remove_pending_user_interrupt;
3598 if (completion_value >= target_value) {
3599 *status = HL_WAIT_CS_STATUS_COMPLETED;
3600 } else if (pend->fence.error) {
3601 dev_err_ratelimited(hdev->dev,
3602 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
3604 /* set the command completion status as ABORTED */
3605 *status = HL_WAIT_CS_STATUS_ABORTED;
3607 timeout = completion_rc;
3610 } else if (completion_rc == -ERESTARTSYS) {
3611 dev_err_ratelimited(hdev->dev,
3612 "user process got signal while waiting for interrupt ID %d\n",
3613 interrupt->interrupt_id);
3616 /* The wait has timed-out. We don't know anything beyond that
3617 * because the workload wasn't submitted through the driver.
3618 * Therefore, from driver's perspective, the workload is still
3622 *status = HL_WAIT_CS_STATUS_BUSY;
3625 remove_pending_user_interrupt:
3626 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
3627 list_del(&pend->list_node);
3628 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
3630 *timestamp = ktime_to_ns(pend->fence.timestamp);
3638 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
3640 u16 interrupt_id, first_interrupt, last_interrupt;
3641 struct hl_device *hdev = hpriv->hdev;
3642 struct asic_fixed_properties *prop;
3643 struct hl_user_interrupt *interrupt;
3644 union hl_wait_cs_args *args = data;
3645 u32 status = HL_WAIT_CS_STATUS_BUSY;
3649 prop = &hdev->asic_prop;
3651 if (!(prop->user_interrupt_count + prop->user_dec_intr_count)) {
3652 dev_err(hdev->dev, "no user interrupts allowed");
3656 interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
3658 first_interrupt = prop->first_available_user_interrupt;
3659 last_interrupt = prop->first_available_user_interrupt + prop->user_interrupt_count - 1;
3661 if (interrupt_id < prop->user_dec_intr_count) {
3663 /* Check if the requested core is enabled */
3664 if (!(prop->decoder_enabled_mask & BIT(interrupt_id))) {
3665 dev_err(hdev->dev, "interrupt on a disabled core(%u) not allowed",
3670 interrupt = &hdev->user_interrupt[interrupt_id];
3672 } else if (interrupt_id >= first_interrupt && interrupt_id <= last_interrupt) {
3674 int_idx = interrupt_id - first_interrupt + prop->user_dec_intr_count;
3675 interrupt = &hdev->user_interrupt[int_idx];
3677 } else if (interrupt_id == HL_COMMON_USER_CQ_INTERRUPT_ID) {
3678 interrupt = &hdev->common_user_cq_interrupt;
3679 } else if (interrupt_id == HL_COMMON_DEC_INTERRUPT_ID) {
3680 interrupt = &hdev->common_decoder_interrupt;
3682 dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
3686 if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ) {
3687 struct wait_interrupt_data wait_intr_data = {0};
3689 wait_intr_data.interrupt = interrupt;
3690 wait_intr_data.mmg = &hpriv->mem_mgr;
3691 wait_intr_data.cq_handle = args->in.cq_counters_handle;
3692 wait_intr_data.cq_offset = args->in.cq_counters_offset;
3693 wait_intr_data.ts_handle = args->in.timestamp_handle;
3694 wait_intr_data.ts_offset = args->in.timestamp_offset;
3695 wait_intr_data.target_value = args->in.target;
3696 wait_intr_data.intr_timeout_us = args->in.interrupt_timeout_us;
3698 if (args->in.flags & HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT) {
3700 * Allow only one registration at a time. this is needed in order to prevent
3701 * issues while handling the flow of re-use of the same offset.
3702 * Since the registration flow is protected only by the interrupt lock,
3703 * re-use flow might request to move ts node to another interrupt list,
3704 * and in such case we're not protected.
3706 mutex_lock(&hpriv->ctx->ts_reg_lock);
3708 rc = _hl_interrupt_ts_reg_ioctl(hdev, hpriv->ctx, &wait_intr_data,
3709 &status, ×tamp);
3711 mutex_unlock(&hpriv->ctx->ts_reg_lock);
3713 rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &wait_intr_data,
3714 &status, ×tamp);
3716 rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx,
3717 args->in.interrupt_timeout_us, args->in.addr,
3718 args->in.target, interrupt, &status,
3725 memset(args, 0, sizeof(*args));
3726 args->out.status = status;
3729 args->out.timestamp_nsec = timestamp;
3730 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
3736 int hl_wait_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
3738 struct hl_fpriv *hpriv = file_priv->driver_priv;
3739 struct hl_device *hdev = hpriv->hdev;
3740 union hl_wait_cs_args *args = data;
3741 u32 flags = args->in.flags;
3744 /* If the device is not operational, or if an error has happened and user should release the
3745 * device, there is no point in waiting for any command submission or user interrupt.
3747 if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active)
3750 if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
3751 rc = hl_interrupt_wait_ioctl(hpriv, data);
3752 else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
3753 rc = hl_multi_cs_wait_ioctl(hpriv, data);
3755 rc = hl_cs_wait_ioctl(hpriv, data);