2 * Copyright (c) 2015-2016 Quantenna Communications, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/types.h>
23 #define pr_fmt(fmt) "qtnfmac shm_ipc: %s: " fmt, __func__
25 static bool qtnf_shm_ipc_has_new_data(struct qtnf_shm_ipc *ipc)
27 const u32 flags = readl(&ipc->shm_region->headroom.hdr.flags);
29 return (flags & QTNF_SHM_IPC_NEW_DATA);
32 static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc)
35 bool rx_buff_ok = true;
36 struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
38 shm_reg_hdr = &ipc->shm_region->headroom.hdr;
40 size = readw(&shm_reg_hdr->data_len);
42 if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) {
43 pr_err("wrong rx packet size: %zu\n", size);
46 memcpy_fromio(ipc->rx_data, ipc->shm_region->data, size);
49 writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags);
50 readl(&shm_reg_hdr->flags); /* flush PCIe write */
52 ipc->interrupt.fn(ipc->interrupt.arg);
54 if (likely(rx_buff_ok)) {
55 ipc->rx_packet_count++;
56 ipc->rx_callback.fn(ipc->rx_callback.arg, ipc->rx_data, size);
60 static void qtnf_shm_ipc_irq_work(struct work_struct *work)
62 struct qtnf_shm_ipc *ipc = container_of(work, struct qtnf_shm_ipc,
65 while (qtnf_shm_ipc_has_new_data(ipc))
66 qtnf_shm_handle_new_data(ipc);
69 static void qtnf_shm_ipc_irq_inbound_handler(struct qtnf_shm_ipc *ipc)
73 flags = readl(&ipc->shm_region->headroom.hdr.flags);
75 if (flags & QTNF_SHM_IPC_NEW_DATA)
76 queue_work(ipc->workqueue, &ipc->irq_work);
79 static void qtnf_shm_ipc_irq_outbound_handler(struct qtnf_shm_ipc *ipc)
83 if (!READ_ONCE(ipc->waiting_for_ack))
86 flags = readl(&ipc->shm_region->headroom.hdr.flags);
88 if (flags & QTNF_SHM_IPC_ACK) {
89 WRITE_ONCE(ipc->waiting_for_ack, 0);
90 complete(&ipc->tx_completion);
94 int qtnf_shm_ipc_init(struct qtnf_shm_ipc *ipc,
95 enum qtnf_shm_ipc_direction direction,
96 struct qtnf_shm_ipc_region __iomem *shm_region,
97 struct workqueue_struct *workqueue,
98 const struct qtnf_shm_ipc_int *interrupt,
99 const struct qtnf_shm_ipc_rx_callback *rx_callback)
101 BUILD_BUG_ON(offsetof(struct qtnf_shm_ipc_region, data) !=
103 BUILD_BUG_ON(sizeof(struct qtnf_shm_ipc_region) > QTN_IPC_REG_SZ);
105 ipc->shm_region = shm_region;
106 ipc->direction = direction;
107 ipc->interrupt = *interrupt;
108 ipc->rx_callback = *rx_callback;
109 ipc->tx_packet_count = 0;
110 ipc->rx_packet_count = 0;
111 ipc->workqueue = workqueue;
112 ipc->waiting_for_ack = 0;
113 ipc->tx_timeout_count = 0;
116 case QTNF_SHM_IPC_OUTBOUND:
117 ipc->irq_handler = qtnf_shm_ipc_irq_outbound_handler;
119 case QTNF_SHM_IPC_INBOUND:
120 ipc->irq_handler = qtnf_shm_ipc_irq_inbound_handler;
126 INIT_WORK(&ipc->irq_work, qtnf_shm_ipc_irq_work);
127 init_completion(&ipc->tx_completion);
132 void qtnf_shm_ipc_free(struct qtnf_shm_ipc *ipc)
134 complete_all(&ipc->tx_completion);
137 int qtnf_shm_ipc_send(struct qtnf_shm_ipc *ipc, const u8 *buf, size_t size)
140 struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
142 shm_reg_hdr = &ipc->shm_region->headroom.hdr;
144 if (unlikely(size > QTN_IPC_MAX_DATA_SZ))
147 ipc->tx_packet_count++;
149 writew(size, &shm_reg_hdr->data_len);
150 memcpy_toio(ipc->shm_region->data, buf, size);
152 /* sync previous writes before proceeding */
155 WRITE_ONCE(ipc->waiting_for_ack, 1);
157 /* sync previous memory write before announcing new data ready */
160 writel(QTNF_SHM_IPC_NEW_DATA, &shm_reg_hdr->flags);
161 readl(&shm_reg_hdr->flags); /* flush PCIe write */
163 ipc->interrupt.fn(ipc->interrupt.arg);
165 if (!wait_for_completion_timeout(&ipc->tx_completion,
166 QTN_SHM_IPC_ACK_TIMEOUT)) {
168 ipc->tx_timeout_count++;
169 pr_err("TX ACK timeout\n");
172 /* now we're not waiting for ACK even in case of timeout */
173 WRITE_ONCE(ipc->waiting_for_ack, 0);