1 // SPDX-License-Identifier: GPL-2.0-only
3 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
9 #include <linux/io-64-nonatomic-lo-hi.h>
10 #include <linux/slab.h>
11 #include <sound/hdaudio_ext.h>
14 #include "registers.h"
17 #define AVS_IPC_TIMEOUT_MS 300
18 #define AVS_D0IX_DELAY_MS 300
21 avs_dsp_set_d0ix(struct avs_dev *adev, bool enable)
23 struct avs_ipc *ipc = adev->ipc;
26 /* Is transition required? */
27 if (ipc->in_d0ix == enable)
30 ret = avs_dsp_op(adev, set_d0ix, enable);
32 /* Prevent further d0ix attempts on conscious IPC failure. */
34 atomic_inc(&ipc->d0ix_disable_depth);
40 ipc->in_d0ix = enable;
44 static void avs_dsp_schedule_d0ix(struct avs_dev *adev, struct avs_ipc_msg *tx)
46 if (atomic_read(&adev->ipc->d0ix_disable_depth))
49 mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work,
50 msecs_to_jiffies(AVS_D0IX_DELAY_MS));
53 static void avs_dsp_d0ix_work(struct work_struct *work)
55 struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work);
57 avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true);
60 static int avs_dsp_wake_d0i0(struct avs_dev *adev, struct avs_ipc_msg *tx)
62 struct avs_ipc *ipc = adev->ipc;
64 if (!atomic_read(&ipc->d0ix_disable_depth)) {
65 cancel_delayed_work_sync(&ipc->d0ix_work);
66 return avs_dsp_set_d0ix(adev, false);
72 int avs_dsp_disable_d0ix(struct avs_dev *adev)
74 struct avs_ipc *ipc = adev->ipc;
76 /* Prevent PG only on the first disable. */
77 if (atomic_add_return(1, &ipc->d0ix_disable_depth) == 1) {
78 cancel_delayed_work_sync(&ipc->d0ix_work);
79 return avs_dsp_set_d0ix(adev, false);
85 int avs_dsp_enable_d0ix(struct avs_dev *adev)
87 struct avs_ipc *ipc = adev->ipc;
89 if (atomic_dec_and_test(&ipc->d0ix_disable_depth))
90 queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work,
91 msecs_to_jiffies(AVS_D0IX_DELAY_MS));
95 static void avs_dsp_recovery(struct avs_dev *adev)
97 struct avs_soc_component *acomp;
98 unsigned int core_mask;
101 mutex_lock(&adev->comp_list_mutex);
102 /* disconnect all running streams */
103 list_for_each_entry(acomp, &adev->comp_list, node) {
104 struct snd_soc_pcm_runtime *rtd;
105 struct snd_soc_card *card;
107 card = acomp->base.card;
111 for_each_card_rtds(card, rtd) {
116 if (!pcm || rtd->dai_link->no_pcm)
119 for_each_pcm_streams(dir) {
120 struct snd_pcm_substream *substream;
122 substream = pcm->streams[dir].substream;
123 if (!substream || !substream->runtime)
126 snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
130 mutex_unlock(&adev->comp_list_mutex);
132 /* forcibly shutdown all cores */
133 core_mask = GENMASK(adev->hw_cfg.dsp_cores - 1, 0);
134 avs_dsp_core_disable(adev, core_mask);
136 /* attempt dsp reboot */
137 ret = avs_dsp_boot_firmware(adev, true);
139 dev_err(adev->dev, "dsp reboot failed: %d\n", ret);
141 pm_runtime_mark_last_busy(adev->dev);
142 pm_runtime_enable(adev->dev);
143 pm_request_autosuspend(adev->dev);
145 atomic_set(&adev->ipc->recovering, 0);
148 static void avs_dsp_recovery_work(struct work_struct *work)
150 struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work);
152 avs_dsp_recovery(to_avs_dev(ipc->dev));
155 static void avs_dsp_exception_caught(struct avs_dev *adev, union avs_notify_msg *msg)
157 struct avs_ipc *ipc = adev->ipc;
159 /* Account for the double-exception case. */
162 if (!atomic_add_unless(&ipc->recovering, 1, 1)) {
163 dev_err(adev->dev, "dsp recovery is already in progress\n");
167 dev_crit(adev->dev, "communication severed, rebooting dsp..\n");
169 cancel_delayed_work_sync(&ipc->d0ix_work);
170 ipc->in_d0ix = false;
171 /* Re-enabled on recovery completion. */
172 pm_runtime_disable(adev->dev);
174 /* Process received notification. */
175 avs_dsp_op(adev, coredump, msg);
177 schedule_work(&ipc->recovery_work);
180 static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header)
182 struct avs_ipc *ipc = adev->ipc;
183 union avs_reply_msg msg = AVS_MSG(header);
186 reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
187 trace_avs_ipc_reply_msg(header, reg);
189 ipc->rx.header = header;
190 /* Abort copying payload if request processing was unsuccessful. */
192 /* update size in case of LARGE_CONFIG_GET */
193 if (msg.msg_target == AVS_MOD_MSG &&
194 msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET)
195 ipc->rx.size = msg.ext.large_config.data_off_size;
197 memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size);
198 trace_avs_msg_payload(ipc->rx.data, ipc->rx.size);
202 static void avs_dsp_process_notification(struct avs_dev *adev, u64 header)
204 struct avs_notify_mod_data mod_data;
205 union avs_notify_msg msg = AVS_MSG(header);
206 size_t data_size = 0;
210 reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
211 trace_avs_ipc_notify_msg(header, reg);
213 /* Ignore spurious notifications until handshake is established. */
214 if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) {
215 dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary);
219 /* Calculate notification payload size. */
220 switch (msg.notify_msg_type) {
221 case AVS_NOTIFY_FW_READY:
224 case AVS_NOTIFY_PHRASE_DETECTED:
225 data_size = sizeof(struct avs_notify_voice_data);
228 case AVS_NOTIFY_RESOURCE_EVENT:
229 data_size = sizeof(struct avs_notify_res_data);
232 case AVS_NOTIFY_LOG_BUFFER_STATUS:
233 case AVS_NOTIFY_EXCEPTION_CAUGHT:
236 case AVS_NOTIFY_MODULE_EVENT:
237 /* To know the total payload size, header needs to be read first. */
238 memcpy_fromio(&mod_data, avs_uplink_addr(adev), sizeof(mod_data));
239 data_size = sizeof(mod_data) + mod_data.data_size;
243 dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary);
248 data = kmalloc(data_size, GFP_KERNEL);
252 memcpy_fromio(data, avs_uplink_addr(adev), data_size);
253 trace_avs_msg_payload(data, data_size);
256 /* Perform notification-specific operations. */
257 switch (msg.notify_msg_type) {
258 case AVS_NOTIFY_FW_READY:
259 dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary);
260 adev->ipc->ready = true;
261 complete(&adev->fw_ready);
264 case AVS_NOTIFY_LOG_BUFFER_STATUS:
265 avs_dsp_op(adev, log_buffer_status, &msg);
268 case AVS_NOTIFY_EXCEPTION_CAUGHT:
269 avs_dsp_exception_caught(adev, &msg);
279 void avs_dsp_process_response(struct avs_dev *adev, u64 header)
281 struct avs_ipc *ipc = adev->ipc;
284 * Response may either be solicited - a reply for a request that has
285 * been sent beforehand - or unsolicited (notification).
287 if (avs_msg_is_reply(header)) {
288 /* Response processing is invoked from IRQ thread. */
289 spin_lock_irq(&ipc->rx_lock);
290 avs_dsp_receive_rx(adev, header);
291 ipc->rx_completed = true;
292 spin_unlock_irq(&ipc->rx_lock);
294 avs_dsp_process_notification(adev, header);
297 complete(&ipc->busy_completion);
300 irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id)
302 struct avs_dev *adev = dev_id;
303 struct avs_ipc *ipc = adev->ipc;
304 u32 adspis, hipc_rsp, hipc_ack;
305 irqreturn_t ret = IRQ_NONE;
307 adspis = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPIS);
308 if (adspis == UINT_MAX || !(adspis & AVS_ADSP_ADSPIS_IPC))
311 hipc_ack = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCIE);
312 hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
314 /* DSP acked host's request */
315 if (hipc_ack & SKL_ADSP_HIPCIE_DONE) {
317 * As an extra precaution, mask done interrupt. Code executed
318 * due to complete() found below does not assume any masking.
320 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
321 AVS_ADSP_HIPCCTL_DONE, 0);
323 complete(&ipc->done_completion);
325 /* tell DSP it has our attention */
326 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCIE,
327 SKL_ADSP_HIPCIE_DONE,
328 SKL_ADSP_HIPCIE_DONE);
329 /* unmask done interrupt */
330 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
331 AVS_ADSP_HIPCCTL_DONE,
332 AVS_ADSP_HIPCCTL_DONE);
336 /* DSP sent new response to process */
337 if (hipc_rsp & SKL_ADSP_HIPCT_BUSY) {
338 /* mask busy interrupt */
339 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
340 AVS_ADSP_HIPCCTL_BUSY, 0);
342 ret = IRQ_WAKE_THREAD;
348 irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id)
350 struct avs_dev *adev = dev_id;
351 union avs_reply_msg msg;
354 hipct = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
355 hipcte = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCTE);
357 /* ensure DSP sent new response to process */
358 if (!(hipct & SKL_ADSP_HIPCT_BUSY))
362 msg.ext.val = hipcte;
363 avs_dsp_process_response(adev, msg.val);
365 /* tell DSP we accepted its message */
366 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCT,
367 SKL_ADSP_HIPCT_BUSY, SKL_ADSP_HIPCT_BUSY);
368 /* unmask busy interrupt */
369 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
370 AVS_ADSP_HIPCCTL_BUSY, AVS_ADSP_HIPCCTL_BUSY);
375 static bool avs_ipc_is_busy(struct avs_ipc *ipc)
377 struct avs_dev *adev = to_avs_dev(ipc->dev);
380 hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
381 return hipc_rsp & SKL_ADSP_HIPCT_BUSY;
384 static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout)
386 u32 repeats_left = 128; /* to avoid infinite looping */
390 ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout));
392 /* DSP could be unresponsive at this point. */
397 if (!avs_ipc_is_busy(ipc))
400 * Firmware did its job, either notification or reply
401 * has been received - now wait until it's processed.
403 wait_for_completion_killable(&ipc->busy_completion);
406 /* Ongoing notification's bottom-half may cause early wakeup */
407 spin_lock(&ipc->rx_lock);
408 if (!ipc->rx_completed) {
410 /* Reply delayed due to notification. */
412 reinit_completion(&ipc->busy_completion);
413 spin_unlock(&ipc->rx_lock);
417 spin_unlock(&ipc->rx_lock);
421 spin_unlock(&ipc->rx_lock);
425 static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply)
427 lockdep_assert_held(&ipc->rx_lock);
430 ipc->rx.size = reply ? reply->size : 0;
431 ipc->rx_completed = false;
433 reinit_completion(&ipc->done_completion);
434 reinit_completion(&ipc->busy_completion);
437 static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx, bool read_fwregs)
441 tx->header |= SKL_ADSP_HIPCI_BUSY;
443 reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
445 trace_avs_request(tx, reg);
448 memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size);
449 snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCIE, tx->header >> 32);
450 snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCI, tx->header & UINT_MAX);
453 static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
454 struct avs_ipc_msg *reply, int timeout)
456 struct avs_ipc *ipc = adev->ipc;
462 mutex_lock(&ipc->msg_mutex);
464 spin_lock(&ipc->rx_lock);
465 avs_ipc_msg_init(ipc, reply);
466 avs_dsp_send_tx(adev, request, true);
467 spin_unlock(&ipc->rx_lock);
469 ret = avs_ipc_wait_busy_completion(ipc, timeout);
471 if (ret == -ETIMEDOUT) {
472 union avs_notify_msg msg = AVS_NOTIFICATION(EXCEPTION_CAUGHT);
474 /* Same treatment as on exception, just stack_dump=0. */
475 avs_dsp_exception_caught(adev, &msg);
480 ret = ipc->rx.rsp.status;
482 reply->header = ipc->rx.header;
483 if (reply->data && ipc->rx.size)
484 memcpy(reply->data, ipc->rx.data, reply->size);
488 mutex_unlock(&ipc->msg_mutex);
492 static int avs_dsp_send_msg_sequence(struct avs_dev *adev, struct avs_ipc_msg *request,
493 struct avs_ipc_msg *reply, int timeout, bool wake_d0i0,
498 trace_avs_d0ix("wake", wake_d0i0, request->header);
500 ret = avs_dsp_wake_d0i0(adev, request);
505 ret = avs_dsp_do_send_msg(adev, request, reply, timeout);
509 trace_avs_d0ix("schedule", schedule_d0ix, request->header);
511 avs_dsp_schedule_d0ix(adev, request);
516 int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request,
517 struct avs_ipc_msg *reply, int timeout)
519 bool wake_d0i0 = avs_dsp_op(adev, d0ix_toggle, request, true);
520 bool schedule_d0ix = avs_dsp_op(adev, d0ix_toggle, request, false);
522 return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, schedule_d0ix);
525 int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
526 struct avs_ipc_msg *reply)
528 return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms);
531 int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request,
532 struct avs_ipc_msg *reply, int timeout, bool wake_d0i0)
534 return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, false);
537 int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
538 struct avs_ipc_msg *reply, bool wake_d0i0)
540 return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms,
544 static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout)
546 struct avs_ipc *ipc = adev->ipc;
549 mutex_lock(&ipc->msg_mutex);
551 spin_lock(&ipc->rx_lock);
552 avs_ipc_msg_init(ipc, NULL);
554 * with hw still stalled, memory windows may not be
555 * configured properly so avoid accessing SRAM
557 avs_dsp_send_tx(adev, request, false);
558 spin_unlock(&ipc->rx_lock);
560 /* ROM messages must be sent before main core is unstalled */
561 ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false);
563 ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout));
564 ret = ret ? 0 : -ETIMEDOUT;
567 mutex_unlock(&ipc->msg_mutex);
572 int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout)
574 return avs_dsp_do_send_rom_msg(adev, request, timeout);
577 int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request)
579 return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms);
582 void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable)
587 * No particular bit setting order. All of these are required
588 * to have a functional SW <-> FW communication.
590 value = enable ? AVS_ADSP_ADSPIC_IPC : 0;
591 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_IPC, value);
593 mask = AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY;
594 value = enable ? mask : 0;
595 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, mask, value);
598 int avs_ipc_init(struct avs_ipc *ipc, struct device *dev)
600 ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
606 ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS;
607 INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work);
608 INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work);
609 init_completion(&ipc->done_completion);
610 init_completion(&ipc->busy_completion);
611 spin_lock_init(&ipc->rx_lock);
612 mutex_init(&ipc->msg_mutex);
617 void avs_ipc_block(struct avs_ipc *ipc)
620 cancel_work_sync(&ipc->recovery_work);
621 cancel_delayed_work_sync(&ipc->d0ix_work);
622 ipc->in_d0ix = false;