1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Message SMC/HVC
9 #include <linux/arm-smccc.h>
10 #include <linux/atomic.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/mutex.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/limits.h>
19 #include <linux/processor.h>
20 #include <linux/slab.h>
25 * The shmem address is split into 4K page and offset.
26 * This is to make sure the parameters fit in 32bit arguments of the
27 * smc/hvc call to keep it uniform across smc32/smc64 conventions.
28 * This however limits the shmem address to 44 bit.
30 * These optional parameters can be used to distinguish among multiple
31 * scmi instances that are using the same smc-id.
32 * The page parameter is passed in r1/x1/w1 register and the offset parameter
33 * is passed in r2/x2/w2 register.
36 #define SHMEM_SIZE (SZ_4K)
37 #define SHMEM_SHIFT 12
38 #define SHMEM_PAGE(x) (_UL((x) >> SHMEM_SHIFT))
39 #define SHMEM_OFFSET(x) ((x) & (SHMEM_SIZE - 1))
42 * struct scmi_smc - Structure representing a SCMI smc transport
44 * @irq: An optional IRQ for completion
45 * @cinfo: SCMI channel info
46 * @shmem: Transmit/Receive shared memory area
47 * @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
48 * Used when NOT operating in atomic mode.
49 * @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
50 * Used when operating in atomic mode.
51 * @func_id: smc/hvc call function id
52 * @param_page: 4K page number of the shmem channel
53 * @param_offset: Offset within the 4K page of the shmem channel
54 * @cap_id: smc/hvc doorbell's capability id to be used on Qualcomm virtual
60 struct scmi_chan_info *cinfo;
61 struct scmi_shared_mem __iomem *shmem;
62 /* Protect access to shmem area */
63 struct mutex shmem_lock;
64 #define INFLIGHT_NONE MSG_TOKEN_MAX
66 unsigned long func_id;
67 unsigned long param_page;
68 unsigned long param_offset;
72 static irqreturn_t smc_msg_done_isr(int irq, void *data)
74 struct scmi_smc *scmi_info = data;
76 scmi_rx_callback(scmi_info->cinfo,
77 shmem_read_header(scmi_info->shmem), NULL);
82 static bool smc_chan_available(struct device_node *of_node, int idx)
84 struct device_node *np = of_parse_phandle(of_node, "shmem", 0);
92 static inline void smc_channel_lock_init(struct scmi_smc *scmi_info)
94 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
95 atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
97 mutex_init(&scmi_info->shmem_lock);
100 static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight)
104 ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq);
106 return ret == INFLIGHT_NONE;
110 smc_channel_lock_acquire(struct scmi_smc *scmi_info,
111 struct scmi_xfer *xfer __maybe_unused)
113 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
114 spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight));
116 mutex_lock(&scmi_info->shmem_lock);
119 static inline void smc_channel_lock_release(struct scmi_smc *scmi_info)
121 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
122 atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
124 mutex_unlock(&scmi_info->shmem_lock);
127 static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
130 struct device *cdev = cinfo->dev;
131 unsigned long cap_id = ULONG_MAX;
132 struct scmi_smc *scmi_info;
133 resource_size_t size;
135 struct device_node *np;
142 scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
146 np = of_parse_phandle(cdev->of_node, "shmem", 0);
147 if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
152 ret = of_address_to_resource(np, 0, &res);
155 dev_err(cdev, "failed to get SCMI Tx shared memory\n");
159 size = resource_size(&res);
160 scmi_info->shmem = devm_ioremap(dev, res.start, size);
161 if (!scmi_info->shmem) {
162 dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
163 return -EADDRNOTAVAIL;
166 ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
170 if (of_device_is_compatible(dev->of_node, "qcom,scmi-smc")) {
171 void __iomem *ptr = (void __iomem *)scmi_info->shmem + size - 8;
172 /* The capability-id is kept in last 8 bytes of shmem.
175 * +-------+ <-- size - 8
179 memcpy_fromio(&cap_id, ptr, sizeof(cap_id));
182 if (of_device_is_compatible(dev->of_node, "arm,scmi-smc-param")) {
183 scmi_info->param_page = SHMEM_PAGE(res.start);
184 scmi_info->param_offset = SHMEM_OFFSET(res.start);
187 * If there is an interrupt named "a2p", then the service and
188 * completion of a message is signaled by an interrupt rather than by
189 * the return of the SMC call.
191 scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p");
192 if (scmi_info->irq > 0) {
193 ret = request_irq(scmi_info->irq, smc_msg_done_isr,
194 IRQF_NO_SUSPEND, dev_name(dev), scmi_info);
196 dev_err(dev, "failed to setup SCMI smc irq\n");
200 cinfo->no_completion_irq = true;
203 scmi_info->func_id = func_id;
204 scmi_info->cap_id = cap_id;
205 scmi_info->cinfo = cinfo;
206 smc_channel_lock_init(scmi_info);
207 cinfo->transport_info = scmi_info;
212 static int smc_chan_free(int id, void *p, void *data)
214 struct scmi_chan_info *cinfo = p;
215 struct scmi_smc *scmi_info = cinfo->transport_info;
217 /* Ignore any possible further reception on the IRQ path */
218 if (scmi_info->irq > 0)
219 free_irq(scmi_info->irq, scmi_info);
221 cinfo->transport_info = NULL;
222 scmi_info->cinfo = NULL;
227 static int smc_send_message(struct scmi_chan_info *cinfo,
228 struct scmi_xfer *xfer)
230 struct scmi_smc *scmi_info = cinfo->transport_info;
231 struct arm_smccc_res res;
234 * Channel will be released only once response has been
235 * surely fully retrieved, so after .mark_txdone()
237 smc_channel_lock_acquire(scmi_info, xfer);
239 shmem_tx_prepare(scmi_info->shmem, xfer, cinfo);
241 if (scmi_info->cap_id != ULONG_MAX)
242 arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0,
243 0, 0, 0, 0, 0, &res);
245 arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->param_page,
246 scmi_info->param_offset, 0, 0, 0, 0, 0,
249 /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
251 smc_channel_lock_release(scmi_info);
258 static void smc_fetch_response(struct scmi_chan_info *cinfo,
259 struct scmi_xfer *xfer)
261 struct scmi_smc *scmi_info = cinfo->transport_info;
263 shmem_fetch_response(scmi_info->shmem, xfer);
266 static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
267 struct scmi_xfer *__unused)
269 struct scmi_smc *scmi_info = cinfo->transport_info;
271 smc_channel_lock_release(scmi_info);
274 static const struct scmi_transport_ops scmi_smc_ops = {
275 .chan_available = smc_chan_available,
276 .chan_setup = smc_chan_setup,
277 .chan_free = smc_chan_free,
278 .send_message = smc_send_message,
279 .mark_txdone = smc_mark_txdone,
280 .fetch_response = smc_fetch_response,
283 const struct scmi_desc scmi_smc_desc = {
284 .ops = &scmi_smc_ops,
285 .max_rx_timeout_ms = 30,
289 * Setting .sync_cmds_atomic_replies to true for SMC assumes that,
290 * once the SMC instruction has completed successfully, the issued
291 * SCMI command would have been already fully processed by the SCMI
292 * platform firmware and so any possible response value expected
293 * for the issued command will be immmediately ready to be fetched
294 * from the shared memory area.
296 .sync_cmds_completed_on_ret = true,
297 .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),