1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-21 Intel Corporation.
6 #include <linux/delay.h>
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_imem.h"
11 #include "iosm_ipc_imem_ops.h"
12 #include "iosm_ipc_port.h"
13 #include "iosm_ipc_task_queue.h"
15 /* Open a packet data online channel between the network layer and CP. */
16 int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
18 dev_dbg(ipc_imem->dev, "%s if id: %d",
19 ipc_imem_phase_get_string(ipc_imem->phase), if_id);
21 /* The network interface is only supported in the runtime phase. */
22 if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
23 dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
24 ipc_imem_phase_get_string(ipc_imem->phase));
28 return ipc_mux_open_session(ipc_imem->mux, if_id);
31 /* Release a net link to CP. */
32 void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
35 if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
36 if_id <= IP_MUX_SESSION_END)
37 ipc_mux_close_session(ipc_imem->mux, if_id);
40 /* Tasklet call to do uplink transfer. */
41 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
42 void *msg, size_t size)
44 ipc_imem_ul_send(ipc_imem);
49 /* Through tasklet to do sio write. */
50 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
52 return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
56 /* Function for transfer UL data */
57 int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
58 int if_id, int channel_id, struct sk_buff *skb)
62 if (!ipc_imem || channel_id < 0)
66 if (ipc_imem->phase != IPC_P_RUN) {
67 dev_dbg(ipc_imem->dev, "phase %s transmit",
68 ipc_imem_phase_get_string(ipc_imem->phase));
73 /* Route the UL packet through IP MUX Layer */
74 ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
79 /* Initialize wwan channel */
80 int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
81 enum ipc_mux_protocol mux_type)
83 struct ipc_chnl_cfg chnl_cfg = { 0 };
85 ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
87 /* If modem version is invalid (0xffffffff), do not initialize WWAN. */
88 if (ipc_imem->cp_version == -1) {
89 dev_err(ipc_imem->dev, "invalid CP version");
93 ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
95 if (ipc_imem->mmio->mux_protocol == MUX_AGGREGATION &&
96 ipc_imem->nr_of_channels == IPC_MEM_IP_CHL_ID_0) {
97 chnl_cfg.ul_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
98 chnl_cfg.dl_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_DL;
99 chnl_cfg.dl_buf_size = IPC_MEM_MAX_ADB_BUF_SIZE;
102 ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
105 /* WWAN registration. */
106 ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
107 if (!ipc_imem->wwan) {
108 dev_err(ipc_imem->dev,
109 "failed to register the ipc_wwan interfaces");
116 /* Map SKB to DMA for transfer */
117 static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
120 struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
121 char *buf = skb->data;
126 ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
131 BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
133 IPC_CB(skb)->mapping = mapping;
134 IPC_CB(skb)->direction = DMA_TO_DEVICE;
135 IPC_CB(skb)->len = len;
136 IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
142 /* return true if channel is ready for use */
143 static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
144 struct ipc_mem_channel *channel)
146 enum ipc_phase phase;
148 /* Update the current operation phase. */
149 phase = ipc_imem->phase;
151 /* Select the operation depending on the execution stage. */
159 /* Prepare the PSI image for the CP ROM driver and
160 * suspend the flash app.
162 if (channel->state != IMEM_CHANNEL_RESERVED) {
163 dev_err(ipc_imem->dev,
164 "ch[%d]:invalid channel state %d,expected %d",
165 channel->channel_id, channel->state,
166 IMEM_CHANNEL_RESERVED);
167 goto channel_unavailable;
169 goto channel_available;
172 /* Ignore uplink actions in all other phases. */
173 dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
174 channel->channel_id, phase);
175 goto channel_unavailable;
177 /* Check the full availability of the channel. */
178 if (channel->state != IMEM_CHANNEL_ACTIVE) {
179 dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
180 channel->channel_id, channel->state);
181 goto channel_unavailable;
192 * ipc_imem_sys_port_close - Release a sio link to CP.
193 * @ipc_imem: Imem instance.
194 * @channel: Channel instance.
196 void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
197 struct ipc_mem_channel *channel)
199 enum ipc_phase curr_phase;
203 curr_phase = ipc_imem->phase;
205 /* If current phase is IPC_P_OFF or SIO ID is -ve then
206 * channel is already freed. Nothing to do.
208 if (curr_phase == IPC_P_OFF) {
209 dev_err(ipc_imem->dev,
210 "nothing to do. Current Phase: %s",
211 ipc_imem_phase_get_string(curr_phase));
215 if (channel->state == IMEM_CHANNEL_FREE) {
216 dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
217 channel->channel_id, channel->state);
221 /* If there are any pending TDs then wait for Timeout/Completion before
224 if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
225 ipc_imem->app_notify_ul_pend = 1;
227 /* Suspend the user app and wait a certain time for processing
230 status = wait_for_completion_interruptible_timeout
231 (&ipc_imem->ul_pend_sem,
232 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
234 dev_dbg(ipc_imem->dev,
235 "Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
236 channel->ul_pipe.pipe_nr,
237 channel->ul_pipe.old_head,
238 channel->ul_pipe.old_tail);
241 ipc_imem->app_notify_ul_pend = 0;
244 /* If there are any pending TDs then wait for Timeout/Completion before
247 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
248 &channel->dl_pipe, NULL, &tail);
250 if (tail != channel->dl_pipe.old_tail) {
251 ipc_imem->app_notify_dl_pend = 1;
253 /* Suspend the user app and wait a certain time for processing
256 status = wait_for_completion_interruptible_timeout
257 (&ipc_imem->dl_pend_sem,
258 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
260 dev_dbg(ipc_imem->dev,
261 "Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
262 channel->dl_pipe.pipe_nr,
263 channel->dl_pipe.old_head,
264 channel->dl_pipe.old_tail);
267 ipc_imem->app_notify_dl_pend = 0;
270 /* Due to wait for completion in messages, there is a small window
271 * between closing the pipe and updating the channel is closed. In this
272 * small window there could be HP update from Host Driver. Hence update
273 * the channel state as CLOSING to aviod unnecessary interrupt
276 channel->state = IMEM_CHANNEL_CLOSING;
278 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
279 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
281 ipc_imem_channel_free(channel);
284 /* Open a PORT link to CP and return the channel */
285 struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
286 int chl_id, int hp_id)
288 struct ipc_mem_channel *channel;
291 /* The PORT interface is only supported in the runtime phase. */
292 if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
293 dev_err(ipc_imem->dev, "PORT open refused, phase %s",
294 ipc_imem_phase_get_string(ipc_imem->phase));
298 ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
301 dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
305 channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
308 dev_err(ipc_imem->dev, "PORT channel id open failed");
315 /* transfer skb to modem */
316 int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
318 struct ipc_mem_channel *channel = ipc_cdev->channel;
319 struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
322 if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
323 ipc_imem->phase == IPC_P_OFF_REQ)
326 ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
331 /* Add skb to the uplink skbuf accumulator. */
332 skb_queue_tail(&channel->ul_list, skb);
334 ret = ipc_imem_call_cdev_write(ipc_imem);
337 skb_dequeue_tail(&channel->ul_list);
338 dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
339 ipc_cdev->channel->channel_id);
345 /* Open a SIO link to CP and return the channel instance */
346 struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
348 struct ipc_mem_channel *channel;
349 enum ipc_phase phase;
352 phase = ipc_imem_phase_update(ipc_imem);
356 /* Get a channel id as flash id and reserve it. */
357 channel_id = ipc_imem_channel_alloc(ipc_imem,
358 IPC_MEM_CTRL_CHL_ID_7,
361 if (channel_id < 0) {
362 dev_err(ipc_imem->dev,
363 "reservation of a flash channel id failed");
367 ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
368 channel = &ipc_imem->channels[channel_id];
370 /* Enqueue chip info data to be read */
371 if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
372 dev_err(ipc_imem->dev, "Enqueue of chip info failed");
373 channel->state = IMEM_CHANNEL_FREE;
381 ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
382 if (ipc_imem->cp_version == -1) {
383 dev_err(ipc_imem->dev, "invalid CP version");
387 channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
388 return ipc_imem_channel_open(ipc_imem, channel_id,
392 /* CP is in the wrong state (e.g. CRASH or CD_READY) */
393 dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
399 /* Release a SIO channel link to CP. */
400 void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
402 struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
403 int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
404 enum ipc_mem_exec_stage exec_stage;
405 struct ipc_mem_channel *channel;
409 channel = ipc_imem->ipc_devlink->devlink_sio.channel;
410 /* Increase the total wait time to boot_check_timeout */
412 exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
413 if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
414 exec_stage == IPC_MEM_EXEC_STAGE_PSI)
417 boot_check_timeout -= 20;
418 } while (boot_check_timeout > 0);
420 /* If there are any pending TDs then wait for Timeout/Completion before
423 if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
424 status = wait_for_completion_interruptible_timeout
425 (&ipc_imem->ul_pend_sem,
426 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
428 dev_dbg(ipc_imem->dev,
429 "Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
430 channel->ul_pipe.pipe_nr,
431 channel->ul_pipe.old_head,
432 channel->ul_pipe.old_tail);
436 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
437 &channel->dl_pipe, NULL, &tail);
439 if (tail != channel->dl_pipe.old_tail) {
440 status = wait_for_completion_interruptible_timeout
441 (&ipc_imem->dl_pend_sem,
442 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
444 dev_dbg(ipc_imem->dev,
445 "Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
446 channel->dl_pipe.pipe_nr,
447 channel->dl_pipe.old_head,
448 channel->dl_pipe.old_tail);
452 /* Due to wait for completion in messages, there is a small window
453 * between closing the pipe and updating the channel is closed. In this
454 * small window there could be HP update from Host Driver. Hence update
455 * the channel state as CLOSING to aviod unnecessary interrupt
458 channel->state = IMEM_CHANNEL_CLOSING;
459 /* Release the pipe resources */
460 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
461 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
462 ipc_imem->nr_of_channels--;
465 void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
468 skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
469 complete(&ipc_devlink->devlink_sio.read_sem);
473 static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
474 struct ipc_mem_channel *channel,
475 unsigned char *buf, int count)
477 int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
478 enum ipc_mem_exec_stage exec_stage;
480 dma_addr_t mapping = 0;
483 ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
486 goto pcie_addr_map_fail;
488 /* Save the PSI information for the CP ROM driver on the doorbell
491 ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
492 ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
494 ret = wait_for_completion_interruptible_timeout
496 msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
499 dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
501 goto psi_transfer_fail;
503 /* If the PSI download fails, return the CP boot ROM exit code */
504 if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
505 ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
506 ret = (-1) * ((int)ipc_imem->rom_exit_code);
507 goto psi_transfer_fail;
510 dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
512 /* Wait psi_start_timeout milliseconds until the CP PSI image is
513 * running and updates the execution_stage field with
514 * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
517 exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
519 if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
523 psi_start_timeout -= 20;
524 } while (psi_start_timeout > 0);
526 if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
527 goto psi_transfer_fail; /* Unknown status of CP PSI process. */
529 ipc_imem->phase = IPC_P_PSI;
531 /* Enter the PSI phase. */
532 dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
534 /* Request the RUNNING state from CP and wait until it was reached
537 ipc_imem_ipc_init_check(ipc_imem);
539 ret = wait_for_completion_interruptible_timeout
540 (&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
542 dev_err(ipc_imem->dev,
543 "Failed PSI RUNNING state on CP, Error-%d", ret);
544 goto psi_transfer_fail;
547 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
548 IPC_MEM_DEVICE_IPC_RUNNING) {
549 dev_err(ipc_imem->dev,
550 "ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
552 ipc_imem_phase_get_string(ipc_imem->phase),
553 ipc_mmio_get_ipc_state(ipc_imem->mmio));
555 goto psi_transfer_fail;
558 /* Create the flash channel for the transfer of the images. */
559 if (!ipc_imem_sys_devlink_open(ipc_imem)) {
560 dev_err(ipc_imem->dev, "can't open flash_channel");
561 goto psi_transfer_fail;
566 ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
571 int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
572 unsigned char *buf, int count)
574 struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
575 struct ipc_mem_channel *channel;
580 channel = ipc_imem->ipc_devlink->devlink_sio.channel;
582 /* In the ROM phase the PSI image is passed to CP about a specific
583 * shared memory area and doorbell scratchpad directly.
585 if (ipc_imem->phase == IPC_P_ROM) {
586 ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
587 /* If the PSI transfer fails then send crash
591 ipc_imem_msg_send_feature_set(ipc_imem,
592 IPC_MEM_INBAND_CRASH_SIG,
597 /* Allocate skb memory for the uplink buffer. */
598 skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
605 skb_put_data(skb, buf, count);
607 IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
609 /* Add skb to the uplink skbuf accumulator. */
610 skb_queue_tail(&channel->ul_list, skb);
612 /* Inform the IPC tasklet to pass uplink IP packets to CP. */
613 if (!ipc_imem_call_cdev_write(ipc_imem)) {
614 ret = wait_for_completion_interruptible(&channel->ul_sem);
617 dev_err(ipc_imem->dev,
618 "ch[%d] no CP confirmation, status = %d",
619 channel->channel_id, ret);
620 ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
629 int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
630 u32 bytes_to_read, u32 *bytes_read)
632 struct sk_buff *skb = NULL;
635 /* check skb is available in rx_list or wait for skb */
636 devlink->devlink_sio.devlink_read_pend = 1;
637 while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
638 if (!wait_for_completion_interruptible_timeout
639 (&devlink->devlink_sio.read_sem,
640 msecs_to_jiffies(IPC_READ_TIMEOUT))) {
641 dev_err(devlink->dev, "Read timedout");
643 goto devlink_read_fail;
646 devlink->devlink_sio.devlink_read_pend = 0;
647 if (bytes_to_read < skb->len) {
648 dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
650 goto devlink_read_fail;
652 *bytes_read = skb->len;
653 memcpy(data, skb->data, skb->len);