GNU Linux-libre 6.7.9-gnu
[releases.git] / drivers / firmware / arm_scmi / smc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Message SMC/HVC
4  * Transport driver
5  *
6  * Copyright 2020 NXP
7  */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/atomic.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/mutex.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/limits.h>
19 #include <linux/processor.h>
20 #include <linux/slab.h>
21
22 #include "common.h"
23
24 /*
25  * The shmem address is split into 4K page and offset.
26  * This is to make sure the parameters fit in 32bit arguments of the
27  * smc/hvc call to keep it uniform across smc32/smc64 conventions.
28  * This however limits the shmem address to 44 bit.
29  *
30  * These optional parameters can be used to distinguish among multiple
31  * scmi instances that are using the same smc-id.
32  * The page parameter is passed in r1/x1/w1 register and the offset parameter
33  * is passed in r2/x2/w2 register.
34  */
35
36 #define SHMEM_SIZE (SZ_4K)
37 #define SHMEM_SHIFT 12
38 #define SHMEM_PAGE(x) (_UL((x) >> SHMEM_SHIFT))
39 #define SHMEM_OFFSET(x) ((x) & (SHMEM_SIZE - 1))
40
41 /**
42  * struct scmi_smc - Structure representing a SCMI smc transport
43  *
44  * @irq: An optional IRQ for completion
45  * @cinfo: SCMI channel info
46  * @shmem: Transmit/Receive shared memory area
47  * @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
48  *              Used when NOT operating in atomic mode.
49  * @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
50  *            Used when operating in atomic mode.
51  * @func_id: smc/hvc call function id
52  * @param_page: 4K page number of the shmem channel
53  * @param_offset: Offset within the 4K page of the shmem channel
54  * @cap_id: smc/hvc doorbell's capability id to be used on Qualcomm virtual
55  *          platforms
56  */
57
58 struct scmi_smc {
59         int irq;
60         struct scmi_chan_info *cinfo;
61         struct scmi_shared_mem __iomem *shmem;
62         /* Protect access to shmem area */
63         struct mutex shmem_lock;
64 #define INFLIGHT_NONE   MSG_TOKEN_MAX
65         atomic_t inflight;
66         unsigned long func_id;
67         unsigned long param_page;
68         unsigned long param_offset;
69         unsigned long cap_id;
70 };
71
72 static irqreturn_t smc_msg_done_isr(int irq, void *data)
73 {
74         struct scmi_smc *scmi_info = data;
75
76         scmi_rx_callback(scmi_info->cinfo,
77                          shmem_read_header(scmi_info->shmem), NULL);
78
79         return IRQ_HANDLED;
80 }
81
82 static bool smc_chan_available(struct device_node *of_node, int idx)
83 {
84         struct device_node *np = of_parse_phandle(of_node, "shmem", 0);
85         if (!np)
86                 return false;
87
88         of_node_put(np);
89         return true;
90 }
91
92 static inline void smc_channel_lock_init(struct scmi_smc *scmi_info)
93 {
94         if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
95                 atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
96         else
97                 mutex_init(&scmi_info->shmem_lock);
98 }
99
100 static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight)
101 {
102         int ret;
103
104         ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq);
105
106         return ret == INFLIGHT_NONE;
107 }
108
109 static inline void
110 smc_channel_lock_acquire(struct scmi_smc *scmi_info,
111                          struct scmi_xfer *xfer __maybe_unused)
112 {
113         if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
114                 spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight));
115         else
116                 mutex_lock(&scmi_info->shmem_lock);
117 }
118
119 static inline void smc_channel_lock_release(struct scmi_smc *scmi_info)
120 {
121         if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
122                 atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
123         else
124                 mutex_unlock(&scmi_info->shmem_lock);
125 }
126
127 static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
128                           bool tx)
129 {
130         struct device *cdev = cinfo->dev;
131         unsigned long cap_id = ULONG_MAX;
132         struct scmi_smc *scmi_info;
133         resource_size_t size;
134         struct resource res;
135         struct device_node *np;
136         u32 func_id;
137         int ret;
138
139         if (!tx)
140                 return -ENODEV;
141
142         scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
143         if (!scmi_info)
144                 return -ENOMEM;
145
146         np = of_parse_phandle(cdev->of_node, "shmem", 0);
147         if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
148                 of_node_put(np);
149                 return -ENXIO;
150         }
151
152         ret = of_address_to_resource(np, 0, &res);
153         of_node_put(np);
154         if (ret) {
155                 dev_err(cdev, "failed to get SCMI Tx shared memory\n");
156                 return ret;
157         }
158
159         size = resource_size(&res);
160         scmi_info->shmem = devm_ioremap(dev, res.start, size);
161         if (!scmi_info->shmem) {
162                 dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
163                 return -EADDRNOTAVAIL;
164         }
165
166         ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
167         if (ret < 0)
168                 return ret;
169
170         if (of_device_is_compatible(dev->of_node, "qcom,scmi-smc")) {
171                 void __iomem *ptr = (void __iomem *)scmi_info->shmem + size - 8;
172                 /* The capability-id is kept in last 8 bytes of shmem.
173                  *     +-------+ <-- 0
174                  *     | shmem |
175                  *     +-------+ <-- size - 8
176                  *     | capId |
177                  *     +-------+ <-- size
178                  */
179                 memcpy_fromio(&cap_id, ptr, sizeof(cap_id));
180         }
181
182         if (of_device_is_compatible(dev->of_node, "arm,scmi-smc-param")) {
183                 scmi_info->param_page = SHMEM_PAGE(res.start);
184                 scmi_info->param_offset = SHMEM_OFFSET(res.start);
185         }
186         /*
187          * If there is an interrupt named "a2p", then the service and
188          * completion of a message is signaled by an interrupt rather than by
189          * the return of the SMC call.
190          */
191         scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p");
192         if (scmi_info->irq > 0) {
193                 ret = request_irq(scmi_info->irq, smc_msg_done_isr,
194                                   IRQF_NO_SUSPEND, dev_name(dev), scmi_info);
195                 if (ret) {
196                         dev_err(dev, "failed to setup SCMI smc irq\n");
197                         return ret;
198                 }
199         } else {
200                 cinfo->no_completion_irq = true;
201         }
202
203         scmi_info->func_id = func_id;
204         scmi_info->cap_id = cap_id;
205         scmi_info->cinfo = cinfo;
206         smc_channel_lock_init(scmi_info);
207         cinfo->transport_info = scmi_info;
208
209         return 0;
210 }
211
212 static int smc_chan_free(int id, void *p, void *data)
213 {
214         struct scmi_chan_info *cinfo = p;
215         struct scmi_smc *scmi_info = cinfo->transport_info;
216
217         /* Ignore any possible further reception on the IRQ path */
218         if (scmi_info->irq > 0)
219                 free_irq(scmi_info->irq, scmi_info);
220
221         cinfo->transport_info = NULL;
222         scmi_info->cinfo = NULL;
223
224         return 0;
225 }
226
227 static int smc_send_message(struct scmi_chan_info *cinfo,
228                             struct scmi_xfer *xfer)
229 {
230         struct scmi_smc *scmi_info = cinfo->transport_info;
231         struct arm_smccc_res res;
232
233         /*
234          * Channel will be released only once response has been
235          * surely fully retrieved, so after .mark_txdone()
236          */
237         smc_channel_lock_acquire(scmi_info, xfer);
238
239         shmem_tx_prepare(scmi_info->shmem, xfer, cinfo);
240
241         if (scmi_info->cap_id != ULONG_MAX)
242                 arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0,
243                                      0, 0, 0, 0, 0, &res);
244         else
245                 arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->param_page,
246                                      scmi_info->param_offset, 0, 0, 0, 0, 0,
247                                      &res);
248
249         /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
250         if (res.a0) {
251                 smc_channel_lock_release(scmi_info);
252                 return -EOPNOTSUPP;
253         }
254
255         return 0;
256 }
257
258 static void smc_fetch_response(struct scmi_chan_info *cinfo,
259                                struct scmi_xfer *xfer)
260 {
261         struct scmi_smc *scmi_info = cinfo->transport_info;
262
263         shmem_fetch_response(scmi_info->shmem, xfer);
264 }
265
266 static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
267                             struct scmi_xfer *__unused)
268 {
269         struct scmi_smc *scmi_info = cinfo->transport_info;
270
271         smc_channel_lock_release(scmi_info);
272 }
273
274 static const struct scmi_transport_ops scmi_smc_ops = {
275         .chan_available = smc_chan_available,
276         .chan_setup = smc_chan_setup,
277         .chan_free = smc_chan_free,
278         .send_message = smc_send_message,
279         .mark_txdone = smc_mark_txdone,
280         .fetch_response = smc_fetch_response,
281 };
282
283 const struct scmi_desc scmi_smc_desc = {
284         .ops = &scmi_smc_ops,
285         .max_rx_timeout_ms = 30,
286         .max_msg = 20,
287         .max_msg_size = 128,
288         /*
289          * Setting .sync_cmds_atomic_replies to true for SMC assumes that,
290          * once the SMC instruction has completed successfully, the issued
291          * SCMI command would have been already fully processed by the SCMI
292          * platform firmware and so any possible response value expected
293          * for the issued command will be immmediately ready to be fetched
294          * from the shared memory area.
295          */
296         .sync_cmds_completed_on_ret = true,
297         .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),
298 };