1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
6 #include <linux/genalloc.h>
7 #include <linux/highmem.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/wait.h>
14 #include "ivpu_hw_reg_io.h"
16 #include "ivpu_jsm_msg.h"
19 #define IPC_MAX_RX_MSG 128
21 struct ivpu_ipc_tx_buf {
22 struct ivpu_ipc_hdr ipc;
23 struct vpu_jsm_msg jsm;
26 static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,
27 struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr)
30 "%s: vpu:0x%x (data_addr:0x%08x, data_size:0x%x, channel:0x%x, src_node:0x%x, dst_node:0x%x, status:0x%x)",
31 c, vpu_addr, ipc_hdr->data_addr, ipc_hdr->data_size, ipc_hdr->channel,
32 ipc_hdr->src_node, ipc_hdr->dst_node, ipc_hdr->status);
35 static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c,
36 struct vpu_jsm_msg *jsm_msg, u32 vpu_addr)
38 u32 *payload = (u32 *)&jsm_msg->payload;
41 "%s: vpu:0x%08x (type:%s, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n",
42 c, vpu_addr, ivpu_jsm_msg_type_to_str(jsm_msg->type),
43 jsm_msg->status, jsm_msg->request_id, jsm_msg->result,
44 payload[0], payload[1], payload[2], payload[3], payload[4]);
48 ivpu_ipc_rx_mark_free(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
49 struct vpu_jsm_msg *jsm_msg)
51 ipc_hdr->status = IVPU_IPC_HDR_FREE;
53 jsm_msg->status = VPU_JSM_MSG_FREE;
54 wmb(); /* Flush WC buffers for message statuses */
57 static void ivpu_ipc_mem_fini(struct ivpu_device *vdev)
59 struct ivpu_ipc_info *ipc = vdev->ipc;
61 ivpu_bo_free_internal(ipc->mem_rx);
62 ivpu_bo_free_internal(ipc->mem_tx);
66 ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
67 struct vpu_jsm_msg *req)
69 struct ivpu_ipc_info *ipc = vdev->ipc;
70 struct ivpu_ipc_tx_buf *tx_buf;
74 tx_buf_vpu_addr = gen_pool_alloc(ipc->mm_tx, sizeof(*tx_buf));
75 if (!tx_buf_vpu_addr) {
76 ivpu_err_ratelimited(vdev, "Failed to reserve IPC buffer, size %ld\n",
81 tx_buf = ivpu_to_cpu_addr(ipc->mem_tx, tx_buf_vpu_addr);
82 if (drm_WARN_ON(&vdev->drm, !tx_buf)) {
83 gen_pool_free(ipc->mm_tx, tx_buf_vpu_addr, sizeof(*tx_buf));
87 jsm_vpu_addr = tx_buf_vpu_addr + offsetof(struct ivpu_ipc_tx_buf, jsm);
89 if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE)
90 ivpu_warn_ratelimited(vdev, "IPC message vpu:0x%x not released by firmware\n",
93 if (tx_buf->jsm.status != VPU_JSM_MSG_FREE)
94 ivpu_warn_ratelimited(vdev, "JSM message vpu:0x%x not released by firmware\n",
97 memset(tx_buf, 0, sizeof(*tx_buf));
98 tx_buf->ipc.data_addr = jsm_vpu_addr;
99 /* TODO: Set data_size to actual JSM message size, not union of all messages */
100 tx_buf->ipc.data_size = sizeof(*req);
101 tx_buf->ipc.channel = cons->channel;
102 tx_buf->ipc.src_node = 0;
103 tx_buf->ipc.dst_node = 1;
104 tx_buf->ipc.status = IVPU_IPC_HDR_ALLOCATED;
105 tx_buf->jsm.type = req->type;
106 tx_buf->jsm.status = VPU_JSM_MSG_ALLOCATED;
107 tx_buf->jsm.payload = req->payload;
109 req->request_id = atomic_inc_return(&ipc->request_id);
110 tx_buf->jsm.request_id = req->request_id;
111 cons->request_id = req->request_id;
112 wmb(); /* Flush WC buffers for IPC, JSM msgs */
114 cons->tx_vpu_addr = tx_buf_vpu_addr;
116 ivpu_jsm_msg_dump(vdev, "TX", &tx_buf->jsm, jsm_vpu_addr);
117 ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr);
122 static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr)
124 struct ivpu_ipc_info *ipc = vdev->ipc;
127 gen_pool_free(ipc->mm_tx, vpu_addr, sizeof(struct ivpu_ipc_tx_buf));
130 static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
132 ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
136 ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
137 struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
139 struct ivpu_ipc_info *ipc = vdev->ipc;
140 struct ivpu_ipc_rx_msg *rx_msg;
142 lockdep_assert_held(&ipc->cons_lock);
143 lockdep_assert_irqs_disabled();
145 rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
147 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
151 atomic_inc(&ipc->rx_msg_count);
153 rx_msg->ipc_hdr = ipc_hdr;
154 rx_msg->jsm_msg = jsm_msg;
155 rx_msg->callback = cons->rx_callback;
157 if (rx_msg->callback) {
158 list_add_tail(&rx_msg->link, &ipc->cb_msg_list);
160 spin_lock(&cons->rx_lock);
161 list_add_tail(&rx_msg->link, &cons->rx_msg_list);
162 spin_unlock(&cons->rx_lock);
163 wake_up(&cons->rx_msg_wq);
168 ivpu_ipc_rx_msg_del(struct ivpu_device *vdev, struct ivpu_ipc_rx_msg *rx_msg)
170 list_del(&rx_msg->link);
171 ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
172 atomic_dec(&vdev->ipc->rx_msg_count);
176 void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
177 u32 channel, ivpu_ipc_rx_callback_t rx_callback)
179 struct ivpu_ipc_info *ipc = vdev->ipc;
181 INIT_LIST_HEAD(&cons->link);
182 cons->channel = channel;
183 cons->tx_vpu_addr = 0;
184 cons->request_id = 0;
185 cons->aborted = false;
186 cons->rx_callback = rx_callback;
187 spin_lock_init(&cons->rx_lock);
188 INIT_LIST_HEAD(&cons->rx_msg_list);
189 init_waitqueue_head(&cons->rx_msg_wq);
191 spin_lock_irq(&ipc->cons_lock);
192 list_add_tail(&cons->link, &ipc->cons_list);
193 spin_unlock_irq(&ipc->cons_lock);
196 void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons)
198 struct ivpu_ipc_info *ipc = vdev->ipc;
199 struct ivpu_ipc_rx_msg *rx_msg, *r;
201 spin_lock_irq(&ipc->cons_lock);
202 list_del(&cons->link);
203 spin_unlock_irq(&ipc->cons_lock);
205 spin_lock_irq(&cons->rx_lock);
206 list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
207 ivpu_ipc_rx_msg_del(vdev, rx_msg);
208 spin_unlock_irq(&cons->rx_lock);
210 ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
214 ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
216 struct ivpu_ipc_info *ipc = vdev->ipc;
219 mutex_lock(&ipc->lock);
226 ret = ivpu_ipc_tx_prepare(vdev, cons, req);
230 ivpu_ipc_tx(vdev, cons->tx_vpu_addr);
233 mutex_unlock(&ipc->lock);
237 static bool ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
241 spin_lock_irq(&cons->rx_lock);
242 ret = !list_empty(&cons->rx_msg_list) || cons->aborted;
243 spin_unlock_irq(&cons->rx_lock);
248 int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
249 struct ivpu_ipc_hdr *ipc_buf,
250 struct vpu_jsm_msg *jsm_msg, unsigned long timeout_ms)
252 struct ivpu_ipc_rx_msg *rx_msg;
253 int wait_ret, ret = 0;
255 if (drm_WARN_ONCE(&vdev->drm, cons->rx_callback, "Consumer works only in async mode\n"))
258 wait_ret = wait_event_timeout(cons->rx_msg_wq,
259 ivpu_ipc_rx_need_wakeup(cons),
260 msecs_to_jiffies(timeout_ms));
265 spin_lock_irq(&cons->rx_lock);
267 spin_unlock_irq(&cons->rx_lock);
270 rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
272 spin_unlock_irq(&cons->rx_lock);
277 memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf));
278 if (rx_msg->jsm_msg) {
279 u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg));
281 if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
282 ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
287 memcpy(jsm_msg, rx_msg->jsm_msg, size);
290 ivpu_ipc_rx_msg_del(vdev, rx_msg);
291 spin_unlock_irq(&cons->rx_lock);
296 ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
297 enum vpu_ipc_msg_type expected_resp_type,
298 struct vpu_jsm_msg *resp, u32 channel,
299 unsigned long timeout_ms)
301 struct ivpu_ipc_consumer cons;
304 ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
306 ret = ivpu_ipc_send(vdev, &cons, req);
308 ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret);
312 ret = ivpu_ipc_receive(vdev, &cons, NULL, resp, timeout_ms);
314 ivpu_warn_ratelimited(vdev, "IPC receive failed: type %s, ret %d\n",
315 ivpu_jsm_msg_type_to_str(req->type), ret);
319 if (resp->type != expected_resp_type) {
320 ivpu_warn_ratelimited(vdev, "Invalid JSM response type: 0x%x\n", resp->type);
325 ivpu_ipc_consumer_del(vdev, &cons);
329 int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
330 enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
331 u32 channel, unsigned long timeout_ms)
333 struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
334 struct vpu_jsm_msg hb_resp;
337 drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
339 ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms);
340 if (ret != -ETIMEDOUT)
343 hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
344 &hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
346 if (hb_ret == -ETIMEDOUT)
347 ivpu_pm_trigger_recovery(vdev, "IPC timeout");
352 int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
353 enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
354 u32 channel, unsigned long timeout_ms)
358 ret = ivpu_rpm_get(vdev);
362 ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms);
369 ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
370 struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
372 if (cons->channel != ipc_hdr->channel)
375 if (!jsm_msg || jsm_msg->request_id == cons->request_id)
381 void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
383 struct ivpu_ipc_info *ipc = vdev->ipc;
384 struct ivpu_ipc_consumer *cons;
385 struct ivpu_ipc_hdr *ipc_hdr;
386 struct vpu_jsm_msg *jsm_msg;
392 * Driver needs to purge all messages from IPC FIFO to clear IPC interrupt.
393 * Without purge IPC FIFO to 0 next IPC interrupts won't be generated.
395 while (ivpu_hw_reg_ipc_rx_count_get(vdev)) {
396 vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
397 if (vpu_addr == REG_IO_ERROR) {
398 ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
402 ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
404 ivpu_warn_ratelimited(vdev, "IPC msg 0x%x out of range\n", vpu_addr);
407 ivpu_ipc_msg_dump(vdev, "RX", ipc_hdr, vpu_addr);
410 if (ipc_hdr->channel != IVPU_IPC_CHAN_BOOT_MSG) {
411 jsm_msg = ivpu_to_cpu_addr(ipc->mem_rx, ipc_hdr->data_addr);
413 ivpu_warn_ratelimited(vdev, "JSM msg 0x%x out of range\n",
415 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL);
418 ivpu_jsm_msg_dump(vdev, "RX", jsm_msg, ipc_hdr->data_addr);
421 if (atomic_read(&ipc->rx_msg_count) > IPC_MAX_RX_MSG) {
422 ivpu_warn_ratelimited(vdev, "IPC RX msg dropped, msg count %d\n",
424 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
429 spin_lock_irqsave(&ipc->cons_lock, flags);
430 list_for_each_entry(cons, &ipc->cons_list, link) {
431 if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) {
432 ivpu_ipc_rx_msg_add(vdev, cons, ipc_hdr, jsm_msg);
437 spin_unlock_irqrestore(&ipc->cons_lock, flags);
440 ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr);
441 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
446 *wake_thread = !list_empty(&ipc->cb_msg_list);
449 irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
451 struct ivpu_ipc_info *ipc = vdev->ipc;
452 struct ivpu_ipc_rx_msg *rx_msg, *r;
453 struct list_head cb_msg_list;
455 INIT_LIST_HEAD(&cb_msg_list);
457 spin_lock_irq(&ipc->cons_lock);
458 list_splice_tail_init(&ipc->cb_msg_list, &cb_msg_list);
459 spin_unlock_irq(&ipc->cons_lock);
461 list_for_each_entry_safe(rx_msg, r, &cb_msg_list, link) {
462 rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
463 ivpu_ipc_rx_msg_del(vdev, rx_msg);
469 int ivpu_ipc_init(struct ivpu_device *vdev)
471 struct ivpu_ipc_info *ipc = vdev->ipc;
474 ipc->mem_tx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
476 ivpu_err(vdev, "Failed to allocate mem_tx\n");
480 ipc->mem_rx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
482 ivpu_err(vdev, "Failed to allocate mem_rx\n");
487 ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT),
489 if (IS_ERR(ipc->mm_tx)) {
490 ret = PTR_ERR(ipc->mm_tx);
491 ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx);
495 ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ivpu_bo_size(ipc->mem_tx), -1);
497 ivpu_err(vdev, "gen_pool_add failed, ret %d\n", ret);
501 spin_lock_init(&ipc->cons_lock);
502 INIT_LIST_HEAD(&ipc->cons_list);
503 INIT_LIST_HEAD(&ipc->cb_msg_list);
504 ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
506 ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
509 ivpu_ipc_reset(vdev);
513 ivpu_bo_free_internal(ipc->mem_rx);
515 ivpu_bo_free_internal(ipc->mem_tx);
519 void ivpu_ipc_fini(struct ivpu_device *vdev)
521 struct ivpu_ipc_info *ipc = vdev->ipc;
523 drm_WARN_ON(&vdev->drm, ipc->on);
524 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list));
525 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
526 drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
528 ivpu_ipc_mem_fini(vdev);
531 void ivpu_ipc_enable(struct ivpu_device *vdev)
533 struct ivpu_ipc_info *ipc = vdev->ipc;
535 mutex_lock(&ipc->lock);
537 mutex_unlock(&ipc->lock);
540 void ivpu_ipc_disable(struct ivpu_device *vdev)
542 struct ivpu_ipc_info *ipc = vdev->ipc;
543 struct ivpu_ipc_consumer *cons, *c;
544 struct ivpu_ipc_rx_msg *rx_msg, *r;
546 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
548 mutex_lock(&ipc->lock);
550 mutex_unlock(&ipc->lock);
552 spin_lock_irq(&ipc->cons_lock);
553 list_for_each_entry_safe(cons, c, &ipc->cons_list, link) {
554 spin_lock(&cons->rx_lock);
555 if (!cons->rx_callback)
556 cons->aborted = true;
557 list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
558 ivpu_ipc_rx_msg_del(vdev, rx_msg);
559 spin_unlock(&cons->rx_lock);
560 wake_up(&cons->rx_msg_wq);
562 spin_unlock_irq(&ipc->cons_lock);
564 drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
567 void ivpu_ipc_reset(struct ivpu_device *vdev)
569 struct ivpu_ipc_info *ipc = vdev->ipc;
571 mutex_lock(&ipc->lock);
572 drm_WARN_ON(&vdev->drm, ipc->on);
574 memset(ivpu_bo_vaddr(ipc->mem_tx), 0, ivpu_bo_size(ipc->mem_tx));
575 memset(ivpu_bo_vaddr(ipc->mem_rx), 0, ivpu_bo_size(ipc->mem_rx));
576 wmb(); /* Flush WC buffers for TX and RX rings */
578 mutex_unlock(&ipc->lock);