GNU Linux-libre 5.4.200-gnu1
[releases.git] / drivers / soc / qcom / rpmh.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  */
5
6 #include <linux/atomic.h>
7 #include <linux/bug.h>
8 #include <linux/interrupt.h>
9 #include <linux/jiffies.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/types.h>
18 #include <linux/wait.h>
19
20 #include <soc/qcom/rpmh.h>
21
22 #include "rpmh-internal.h"
23
24 #define RPMH_TIMEOUT_MS                 msecs_to_jiffies(10000)
25
26 #define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name)        \
27         struct rpmh_request name = {                    \
28                 .msg = {                                \
29                         .state = s,                     \
30                         .cmds = name.cmd,               \
31                         .num_cmds = 0,                  \
32                         .wait_for_compl = true,         \
33                 },                                      \
34                 .cmd = { { 0 } },                       \
35                 .completion = q,                        \
36                 .dev = dev,                             \
37                 .needs_free = false,                            \
38         }
39
40 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
41
42 /**
43  * struct cache_req: the request object for caching
44  *
45  * @addr: the address of the resource
46  * @sleep_val: the sleep vote
47  * @wake_val: the wake vote
48  * @list: linked list obj
49  */
50 struct cache_req {
51         u32 addr;
52         u32 sleep_val;
53         u32 wake_val;
54         struct list_head list;
55 };
56
57 /**
58  * struct batch_cache_req - An entry in our batch catch
59  *
60  * @list: linked list obj
61  * @count: number of messages
62  * @rpm_msgs: the messages
63  */
64
65 struct batch_cache_req {
66         struct list_head list;
67         int count;
68         struct rpmh_request rpm_msgs[];
69 };
70
71 static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
72 {
73         struct rsc_drv *drv = dev_get_drvdata(dev->parent);
74
75         return &drv->client;
76 }
77
78 void rpmh_tx_done(const struct tcs_request *msg, int r)
79 {
80         struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
81                                                     msg);
82         struct completion *compl = rpm_msg->completion;
83         bool free = rpm_msg->needs_free;
84
85         rpm_msg->err = r;
86
87         if (r)
88                 dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
89                         rpm_msg->msg.cmds[0].addr, r);
90
91         if (!compl)
92                 goto exit;
93
94         /* Signal the blocking thread we are done */
95         complete(compl);
96
97 exit:
98         if (free)
99                 kfree(rpm_msg);
100 }
101
102 static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
103 {
104         struct cache_req *p, *req = NULL;
105
106         list_for_each_entry(p, &ctrlr->cache, list) {
107                 if (p->addr == addr) {
108                         req = p;
109                         break;
110                 }
111         }
112
113         return req;
114 }
115
116 static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
117                                            enum rpmh_state state,
118                                            struct tcs_cmd *cmd)
119 {
120         struct cache_req *req;
121         unsigned long flags;
122         u32 old_sleep_val, old_wake_val;
123
124         spin_lock_irqsave(&ctrlr->cache_lock, flags);
125         req = __find_req(ctrlr, cmd->addr);
126         if (req)
127                 goto existing;
128
129         req = kzalloc(sizeof(*req), GFP_ATOMIC);
130         if (!req) {
131                 req = ERR_PTR(-ENOMEM);
132                 goto unlock;
133         }
134
135         req->addr = cmd->addr;
136         req->sleep_val = req->wake_val = UINT_MAX;
137         list_add_tail(&req->list, &ctrlr->cache);
138
139 existing:
140         old_sleep_val = req->sleep_val;
141         old_wake_val = req->wake_val;
142
143         switch (state) {
144         case RPMH_ACTIVE_ONLY_STATE:
145         case RPMH_WAKE_ONLY_STATE:
146                 req->wake_val = cmd->data;
147                 break;
148         case RPMH_SLEEP_STATE:
149                 req->sleep_val = cmd->data;
150                 break;
151         }
152
153         ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
154                          req->wake_val != old_wake_val) &&
155                          req->sleep_val != UINT_MAX &&
156                          req->wake_val != UINT_MAX;
157
158 unlock:
159         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
160
161         return req;
162 }
163
164 /**
165  * __rpmh_write: Cache and send the RPMH request
166  *
167  * @dev: The device making the request
168  * @state: Active/Sleep request type
169  * @rpm_msg: The data that needs to be sent (cmds).
170  *
171  * Cache the RPMH request and send if the state is ACTIVE_ONLY.
172  * SLEEP/WAKE_ONLY requests are not sent to the controller at
173  * this time. Use rpmh_flush() to send them to the controller.
174  */
175 static int __rpmh_write(const struct device *dev, enum rpmh_state state,
176                         struct rpmh_request *rpm_msg)
177 {
178         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
179         int ret = -EINVAL;
180         struct cache_req *req;
181         int i;
182
183         rpm_msg->msg.state = state;
184
185         /* Cache the request in our store and link the payload */
186         for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
187                 req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
188                 if (IS_ERR(req))
189                         return PTR_ERR(req);
190         }
191
192         rpm_msg->msg.state = state;
193
194         if (state == RPMH_ACTIVE_ONLY_STATE) {
195                 WARN_ON(irqs_disabled());
196                 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
197         } else {
198                 /* Clean up our call by spoofing tx_done */
199                 ret = 0;
200                 rpmh_tx_done(&rpm_msg->msg, ret);
201         }
202
203         return ret;
204 }
205
206 static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
207                 const struct tcs_cmd *cmd, u32 n)
208 {
209         if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
210                 return -EINVAL;
211
212         memcpy(req->cmd, cmd, n * sizeof(*cmd));
213
214         req->msg.state = state;
215         req->msg.cmds = req->cmd;
216         req->msg.num_cmds = n;
217
218         return 0;
219 }
220
221 /**
222  * rpmh_write_async: Write a set of RPMH commands
223  *
224  * @dev: The device making the request
225  * @state: Active/sleep set
226  * @cmd: The payload data
227  * @n: The number of elements in payload
228  *
229  * Write a set of RPMH commands, the order of commands is maintained
230  * and will be sent as a single shot.
231  */
232 int rpmh_write_async(const struct device *dev, enum rpmh_state state,
233                      const struct tcs_cmd *cmd, u32 n)
234 {
235         struct rpmh_request *rpm_msg;
236         int ret;
237
238         rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
239         if (!rpm_msg)
240                 return -ENOMEM;
241         rpm_msg->needs_free = true;
242
243         ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
244         if (ret) {
245                 kfree(rpm_msg);
246                 return ret;
247         }
248
249         return __rpmh_write(dev, state, rpm_msg);
250 }
251 EXPORT_SYMBOL(rpmh_write_async);
252
253 /**
254  * rpmh_write: Write a set of RPMH commands and block until response
255  *
256  * @rc: The RPMH handle got from rpmh_get_client
257  * @state: Active/sleep set
258  * @cmd: The payload data
259  * @n: The number of elements in @cmd
260  *
261  * May sleep. Do not call from atomic contexts.
262  */
263 int rpmh_write(const struct device *dev, enum rpmh_state state,
264                const struct tcs_cmd *cmd, u32 n)
265 {
266         DECLARE_COMPLETION_ONSTACK(compl);
267         DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
268         int ret;
269
270         if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
271                 return -EINVAL;
272
273         memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
274         rpm_msg.msg.num_cmds = n;
275
276         ret = __rpmh_write(dev, state, &rpm_msg);
277         if (ret)
278                 return ret;
279
280         ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
281         WARN_ON(!ret);
282         return (ret > 0) ? 0 : -ETIMEDOUT;
283 }
284 EXPORT_SYMBOL(rpmh_write);
285
286 static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
287 {
288         unsigned long flags;
289
290         spin_lock_irqsave(&ctrlr->cache_lock, flags);
291         list_add_tail(&req->list, &ctrlr->batch_cache);
292         ctrlr->dirty = true;
293         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
294 }
295
296 static int flush_batch(struct rpmh_ctrlr *ctrlr)
297 {
298         struct batch_cache_req *req;
299         const struct rpmh_request *rpm_msg;
300         unsigned long flags;
301         int ret = 0;
302         int i;
303
304         /* Send Sleep/Wake requests to the controller, expect no response */
305         spin_lock_irqsave(&ctrlr->cache_lock, flags);
306         list_for_each_entry(req, &ctrlr->batch_cache, list) {
307                 for (i = 0; i < req->count; i++) {
308                         rpm_msg = req->rpm_msgs + i;
309                         ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
310                                                        &rpm_msg->msg);
311                         if (ret)
312                                 break;
313                 }
314         }
315         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
316
317         return ret;
318 }
319
320 /**
321  * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
322  * batch to finish.
323  *
324  * @dev: the device making the request
325  * @state: Active/sleep set
326  * @cmd: The payload data
327  * @n: The array of count of elements in each batch, 0 terminated.
328  *
329  * Write a request to the RSC controller without caching. If the request
330  * state is ACTIVE, then the requests are treated as completion request
331  * and sent to the controller immediately. The function waits until all the
332  * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
333  * request is sent as fire-n-forget and no ack is expected.
334  *
335  * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
336  */
337 int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
338                      const struct tcs_cmd *cmd, u32 *n)
339 {
340         struct batch_cache_req *req;
341         struct rpmh_request *rpm_msgs;
342         struct completion *compls;
343         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
344         unsigned long time_left;
345         int count = 0;
346         int ret, i;
347         void *ptr;
348
349         if (!cmd || !n)
350                 return -EINVAL;
351
352         while (n[count] > 0)
353                 count++;
354         if (!count)
355                 return -EINVAL;
356
357         ptr = kzalloc(sizeof(*req) +
358                       count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
359                       GFP_ATOMIC);
360         if (!ptr)
361                 return -ENOMEM;
362
363         req = ptr;
364         compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
365
366         req->count = count;
367         rpm_msgs = req->rpm_msgs;
368
369         for (i = 0; i < count; i++) {
370                 __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
371                 cmd += n[i];
372         }
373
374         if (state != RPMH_ACTIVE_ONLY_STATE) {
375                 cache_batch(ctrlr, req);
376                 return 0;
377         }
378
379         for (i = 0; i < count; i++) {
380                 struct completion *compl = &compls[i];
381
382                 init_completion(compl);
383                 rpm_msgs[i].completion = compl;
384                 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
385                 if (ret) {
386                         pr_err("Error(%d) sending RPMH message addr=%#x\n",
387                                ret, rpm_msgs[i].msg.cmds[0].addr);
388                         break;
389                 }
390         }
391
392         time_left = RPMH_TIMEOUT_MS;
393         while (i--) {
394                 time_left = wait_for_completion_timeout(&compls[i], time_left);
395                 if (!time_left) {
396                         /*
397                          * Better hope they never finish because they'll signal
398                          * the completion that we're going to free once
399                          * we've returned from this function.
400                          */
401                         WARN_ON(1);
402                         ret = -ETIMEDOUT;
403                         goto exit;
404                 }
405         }
406
407 exit:
408         kfree(ptr);
409
410         return ret;
411 }
412 EXPORT_SYMBOL(rpmh_write_batch);
413
414 static int is_req_valid(struct cache_req *req)
415 {
416         return (req->sleep_val != UINT_MAX &&
417                 req->wake_val != UINT_MAX &&
418                 req->sleep_val != req->wake_val);
419 }
420
421 static int send_single(const struct device *dev, enum rpmh_state state,
422                        u32 addr, u32 data)
423 {
424         DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
425         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
426
427         /* Wake sets are always complete and sleep sets are not */
428         rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
429         rpm_msg.cmd[0].addr = addr;
430         rpm_msg.cmd[0].data = data;
431         rpm_msg.msg.num_cmds = 1;
432
433         return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
434 }
435
436 /**
437  * rpmh_flush: Flushes the buffered active and sleep sets to TCS
438  *
439  * @dev: The device making the request
440  *
441  * Return: -EBUSY if the controller is busy, probably waiting on a response
442  * to a RPMH request sent earlier.
443  *
444  * This function is always called from the sleep code from the last CPU
445  * that is powering down the entire system. Since no other RPMH API would be
446  * executing at this time, it is safe to run lockless.
447  */
448 int rpmh_flush(const struct device *dev)
449 {
450         struct cache_req *p;
451         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
452         int ret;
453
454         if (!ctrlr->dirty) {
455                 pr_debug("Skipping flush, TCS has latest data.\n");
456                 return 0;
457         }
458
459         /* Invalidate the TCSes first to avoid stale data */
460         do {
461                 ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
462         } while (ret == -EAGAIN);
463         if (ret)
464                 return ret;
465
466         /* First flush the cached batch requests */
467         ret = flush_batch(ctrlr);
468         if (ret)
469                 return ret;
470
471         /*
472          * Nobody else should be calling this function other than system PM,
473          * hence we can run without locks.
474          */
475         list_for_each_entry(p, &ctrlr->cache, list) {
476                 if (!is_req_valid(p)) {
477                         pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
478                                  __func__, p->addr, p->sleep_val, p->wake_val);
479                         continue;
480                 }
481                 ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
482                 if (ret)
483                         return ret;
484                 ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
485                                   p->addr, p->wake_val);
486                 if (ret)
487                         return ret;
488         }
489
490         ctrlr->dirty = false;
491
492         return 0;
493 }
494 EXPORT_SYMBOL(rpmh_flush);
495
496 /**
497  * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
498  *
499  * @dev: The device making the request
500  *
501  * Invalidate the sleep and wake values in batch_cache.
502  */
503 int rpmh_invalidate(const struct device *dev)
504 {
505         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
506         struct batch_cache_req *req, *tmp;
507         unsigned long flags;
508
509         spin_lock_irqsave(&ctrlr->cache_lock, flags);
510         list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
511                 kfree(req);
512         INIT_LIST_HEAD(&ctrlr->batch_cache);
513         ctrlr->dirty = true;
514         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
515
516         return 0;
517 }
518 EXPORT_SYMBOL(rpmh_invalidate);