GNU Linux-libre 6.7.9-gnu
[releases.git] / drivers / firmware / arm_scmi / clock.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Clock Protocol
4  *
5  * Copyright (C) 2018-2022 ARM Ltd.
6  */
7
8 #include <linux/module.h>
9 #include <linux/limits.h>
10 #include <linux/sort.h>
11
12 #include "protocols.h"
13 #include "notify.h"
14
15 enum scmi_clock_protocol_cmd {
16         CLOCK_ATTRIBUTES = 0x3,
17         CLOCK_DESCRIBE_RATES = 0x4,
18         CLOCK_RATE_SET = 0x5,
19         CLOCK_RATE_GET = 0x6,
20         CLOCK_CONFIG_SET = 0x7,
21         CLOCK_NAME_GET = 0x8,
22         CLOCK_RATE_NOTIFY = 0x9,
23         CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
24         CLOCK_CONFIG_GET = 0xB,
25         CLOCK_POSSIBLE_PARENTS_GET = 0xC,
26         CLOCK_PARENT_SET = 0xD,
27         CLOCK_PARENT_GET = 0xE,
28 };
29
30 enum clk_state {
31         CLK_STATE_DISABLE,
32         CLK_STATE_ENABLE,
33         CLK_STATE_RESERVED,
34         CLK_STATE_UNCHANGED,
35 };
36
37 struct scmi_msg_resp_clock_protocol_attributes {
38         __le16 num_clocks;
39         u8 max_async_req;
40         u8 reserved;
41 };
42
43 struct scmi_msg_resp_clock_attributes {
44         __le32 attributes;
45 #define SUPPORTS_RATE_CHANGED_NOTIF(x)          ((x) & BIT(31))
46 #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
47 #define SUPPORTS_EXTENDED_NAMES(x)              ((x) & BIT(29))
48 #define SUPPORTS_PARENT_CLOCK(x)                ((x) & BIT(28))
49         u8 name[SCMI_SHORT_NAME_MAX_SIZE];
50         __le32 clock_enable_latency;
51 };
52
53 struct scmi_msg_clock_possible_parents {
54         __le32 id;
55         __le32 skip_parents;
56 };
57
58 struct scmi_msg_resp_clock_possible_parents {
59         __le32 num_parent_flags;
60 #define NUM_PARENTS_RETURNED(x)         ((x) & 0xff)
61 #define NUM_PARENTS_REMAINING(x)        ((x) >> 24)
62         __le32 possible_parents[];
63 };
64
65 struct scmi_msg_clock_set_parent {
66         __le32 id;
67         __le32 parent_id;
68 };
69
70 struct scmi_msg_clock_config_set {
71         __le32 id;
72         __le32 attributes;
73 };
74
75 /* Valid only from SCMI clock v2.1 */
76 struct scmi_msg_clock_config_set_v2 {
77         __le32 id;
78         __le32 attributes;
79 #define NULL_OEM_TYPE                   0
80 #define REGMASK_OEM_TYPE_SET            GENMASK(23, 16)
81 #define REGMASK_CLK_STATE               GENMASK(1, 0)
82         __le32 oem_config_val;
83 };
84
85 struct scmi_msg_clock_config_get {
86         __le32 id;
87         __le32 flags;
88 #define REGMASK_OEM_TYPE_GET            GENMASK(7, 0)
89 };
90
91 struct scmi_msg_resp_clock_config_get {
92         __le32 attributes;
93         __le32 config;
94 #define IS_CLK_ENABLED(x)               le32_get_bits((x), BIT(0))
95         __le32 oem_config_val;
96 };
97
98 struct scmi_msg_clock_describe_rates {
99         __le32 id;
100         __le32 rate_index;
101 };
102
103 struct scmi_msg_resp_clock_describe_rates {
104         __le32 num_rates_flags;
105 #define NUM_RETURNED(x)         ((x) & 0xfff)
106 #define RATE_DISCRETE(x)        !((x) & BIT(12))
107 #define NUM_REMAINING(x)        ((x) >> 16)
108         struct {
109                 __le32 value_low;
110                 __le32 value_high;
111         } rate[];
112 #define RATE_TO_U64(X)          \
113 ({                              \
114         typeof(X) x = (X);      \
115         le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
116 })
117 };
118
119 struct scmi_clock_set_rate {
120         __le32 flags;
121 #define CLOCK_SET_ASYNC         BIT(0)
122 #define CLOCK_SET_IGNORE_RESP   BIT(1)
123 #define CLOCK_SET_ROUND_UP      BIT(2)
124 #define CLOCK_SET_ROUND_AUTO    BIT(3)
125         __le32 id;
126         __le32 value_low;
127         __le32 value_high;
128 };
129
130 struct scmi_msg_resp_set_rate_complete {
131         __le32 id;
132         __le32 rate_low;
133         __le32 rate_high;
134 };
135
136 struct scmi_msg_clock_rate_notify {
137         __le32 clk_id;
138         __le32 notify_enable;
139 };
140
141 struct scmi_clock_rate_notify_payld {
142         __le32 agent_id;
143         __le32 clock_id;
144         __le32 rate_low;
145         __le32 rate_high;
146 };
147
148 struct clock_info {
149         u32 version;
150         int num_clocks;
151         int max_async_req;
152         atomic_t cur_async_req;
153         struct scmi_clock_info *clk;
154         int (*clock_config_set)(const struct scmi_protocol_handle *ph,
155                                 u32 clk_id, enum clk_state state,
156                                 u8 oem_type, u32 oem_val, bool atomic);
157         int (*clock_config_get)(const struct scmi_protocol_handle *ph,
158                                 u32 clk_id, u8 oem_type, u32 *attributes,
159                                 bool *enabled, u32 *oem_val, bool atomic);
160 };
161
162 static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
163         CLOCK_RATE_NOTIFY,
164         CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
165 };
166
167 static int
168 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
169                                    struct clock_info *ci)
170 {
171         int ret;
172         struct scmi_xfer *t;
173         struct scmi_msg_resp_clock_protocol_attributes *attr;
174
175         ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
176                                       0, sizeof(*attr), &t);
177         if (ret)
178                 return ret;
179
180         attr = t->rx.buf;
181
182         ret = ph->xops->do_xfer(ph, t);
183         if (!ret) {
184                 ci->num_clocks = le16_to_cpu(attr->num_clocks);
185                 ci->max_async_req = attr->max_async_req;
186         }
187
188         ph->xops->xfer_put(ph, t);
189         return ret;
190 }
191
192 struct scmi_clk_ipriv {
193         struct device *dev;
194         u32 clk_id;
195         struct scmi_clock_info *clk;
196 };
197
198 static void iter_clk_possible_parents_prepare_message(void *message, unsigned int desc_index,
199                                                       const void *priv)
200 {
201         struct scmi_msg_clock_possible_parents *msg = message;
202         const struct scmi_clk_ipriv *p = priv;
203
204         msg->id = cpu_to_le32(p->clk_id);
205         /* Set the number of OPPs to be skipped/already read */
206         msg->skip_parents = cpu_to_le32(desc_index);
207 }
208
209 static int iter_clk_possible_parents_update_state(struct scmi_iterator_state *st,
210                                                   const void *response, void *priv)
211 {
212         const struct scmi_msg_resp_clock_possible_parents *r = response;
213         struct scmi_clk_ipriv *p = priv;
214         struct device *dev = ((struct scmi_clk_ipriv *)p)->dev;
215         u32 flags;
216
217         flags = le32_to_cpu(r->num_parent_flags);
218         st->num_returned = NUM_PARENTS_RETURNED(flags);
219         st->num_remaining = NUM_PARENTS_REMAINING(flags);
220
221         /*
222          * num parents is not declared previously anywhere so we
223          * assume it's returned+remaining on first call.
224          */
225         if (!st->max_resources) {
226                 p->clk->num_parents = st->num_returned + st->num_remaining;
227                 p->clk->parents = devm_kcalloc(dev, p->clk->num_parents,
228                                                sizeof(*p->clk->parents),
229                                                GFP_KERNEL);
230                 if (!p->clk->parents) {
231                         p->clk->num_parents = 0;
232                         return -ENOMEM;
233                 }
234                 st->max_resources = st->num_returned + st->num_remaining;
235         }
236
237         return 0;
238 }
239
240 static int iter_clk_possible_parents_process_response(const struct scmi_protocol_handle *ph,
241                                                       const void *response,
242                                                       struct scmi_iterator_state *st,
243                                                       void *priv)
244 {
245         const struct scmi_msg_resp_clock_possible_parents *r = response;
246         struct scmi_clk_ipriv *p = priv;
247
248         u32 *parent = &p->clk->parents[st->desc_index + st->loop_idx];
249
250         *parent = le32_to_cpu(r->possible_parents[st->loop_idx]);
251
252         return 0;
253 }
254
255 static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u32 clk_id,
256                                        struct scmi_clock_info *clk)
257 {
258         struct scmi_iterator_ops ops = {
259                 .prepare_message = iter_clk_possible_parents_prepare_message,
260                 .update_state = iter_clk_possible_parents_update_state,
261                 .process_response = iter_clk_possible_parents_process_response,
262         };
263
264         struct scmi_clk_ipriv ppriv = {
265                 .clk_id = clk_id,
266                 .clk = clk,
267                 .dev = ph->dev,
268         };
269         void *iter;
270         int ret;
271
272         iter = ph->hops->iter_response_init(ph, &ops, 0,
273                                             CLOCK_POSSIBLE_PARENTS_GET,
274                                             sizeof(struct scmi_msg_clock_possible_parents),
275                                             &ppriv);
276         if (IS_ERR(iter))
277                 return PTR_ERR(iter);
278
279         ret = ph->hops->iter_response_run(iter);
280
281         return ret;
282 }
283
284 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
285                                      u32 clk_id, struct scmi_clock_info *clk,
286                                      u32 version)
287 {
288         int ret;
289         u32 attributes;
290         struct scmi_xfer *t;
291         struct scmi_msg_resp_clock_attributes *attr;
292
293         ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
294                                       sizeof(clk_id), sizeof(*attr), &t);
295         if (ret)
296                 return ret;
297
298         put_unaligned_le32(clk_id, t->tx.buf);
299         attr = t->rx.buf;
300
301         ret = ph->xops->do_xfer(ph, t);
302         if (!ret) {
303                 u32 latency = 0;
304                 attributes = le32_to_cpu(attr->attributes);
305                 strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
306                 /* clock_enable_latency field is present only since SCMI v3.1 */
307                 if (PROTOCOL_REV_MAJOR(version) >= 0x2)
308                         latency = le32_to_cpu(attr->clock_enable_latency);
309                 clk->enable_latency = latency ? : U32_MAX;
310         }
311
312         ph->xops->xfer_put(ph, t);
313
314         /*
315          * If supported overwrite short name with the extended one;
316          * on error just carry on and use already provided short name.
317          */
318         if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
319                 if (SUPPORTS_EXTENDED_NAMES(attributes))
320                         ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
321                                                     clk->name,
322                                                     SCMI_MAX_STR_SIZE);
323
324                 if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
325                         clk->rate_changed_notifications = true;
326                 if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
327                         clk->rate_change_requested_notifications = true;
328                 if (SUPPORTS_PARENT_CLOCK(attributes))
329                         scmi_clock_possible_parents(ph, clk_id, clk);
330         }
331
332         return ret;
333 }
334
335 static int rate_cmp_func(const void *_r1, const void *_r2)
336 {
337         const u64 *r1 = _r1, *r2 = _r2;
338
339         if (*r1 < *r2)
340                 return -1;
341         else if (*r1 == *r2)
342                 return 0;
343         else
344                 return 1;
345 }
346
347 static void iter_clk_describe_prepare_message(void *message,
348                                               const unsigned int desc_index,
349                                               const void *priv)
350 {
351         struct scmi_msg_clock_describe_rates *msg = message;
352         const struct scmi_clk_ipriv *p = priv;
353
354         msg->id = cpu_to_le32(p->clk_id);
355         /* Set the number of rates to be skipped/already read */
356         msg->rate_index = cpu_to_le32(desc_index);
357 }
358
359 static int
360 iter_clk_describe_update_state(struct scmi_iterator_state *st,
361                                const void *response, void *priv)
362 {
363         u32 flags;
364         struct scmi_clk_ipriv *p = priv;
365         const struct scmi_msg_resp_clock_describe_rates *r = response;
366
367         flags = le32_to_cpu(r->num_rates_flags);
368         st->num_remaining = NUM_REMAINING(flags);
369         st->num_returned = NUM_RETURNED(flags);
370         p->clk->rate_discrete = RATE_DISCRETE(flags);
371
372         /* Warn about out of spec replies ... */
373         if (!p->clk->rate_discrete &&
374             (st->num_returned != 3 || st->num_remaining != 0)) {
375                 dev_warn(p->dev,
376                          "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
377                          p->clk->name, st->num_returned, st->num_remaining,
378                          st->rx_len);
379
380                 /*
381                  * A known quirk: a triplet is returned but num_returned != 3
382                  * Check for a safe payload size and fix.
383                  */
384                 if (st->num_returned != 3 && st->num_remaining == 0 &&
385                     st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
386                         st->num_returned = 3;
387                         st->num_remaining = 0;
388                 } else {
389                         dev_err(p->dev,
390                                 "Cannot fix out-of-spec reply !\n");
391                         return -EPROTO;
392                 }
393         }
394
395         return 0;
396 }
397
398 static int
399 iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
400                                    const void *response,
401                                    struct scmi_iterator_state *st, void *priv)
402 {
403         int ret = 0;
404         struct scmi_clk_ipriv *p = priv;
405         const struct scmi_msg_resp_clock_describe_rates *r = response;
406
407         if (!p->clk->rate_discrete) {
408                 switch (st->desc_index + st->loop_idx) {
409                 case 0:
410                         p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
411                         break;
412                 case 1:
413                         p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
414                         break;
415                 case 2:
416                         p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
417                         break;
418                 default:
419                         ret = -EINVAL;
420                         break;
421                 }
422         } else {
423                 u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
424
425                 *rate = RATE_TO_U64(r->rate[st->loop_idx]);
426                 p->clk->list.num_rates++;
427         }
428
429         return ret;
430 }
431
432 static int
433 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
434                               struct scmi_clock_info *clk)
435 {
436         int ret;
437         void *iter;
438         struct scmi_iterator_ops ops = {
439                 .prepare_message = iter_clk_describe_prepare_message,
440                 .update_state = iter_clk_describe_update_state,
441                 .process_response = iter_clk_describe_process_response,
442         };
443         struct scmi_clk_ipriv cpriv = {
444                 .clk_id = clk_id,
445                 .clk = clk,
446                 .dev = ph->dev,
447         };
448
449         iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
450                                             CLOCK_DESCRIBE_RATES,
451                                             sizeof(struct scmi_msg_clock_describe_rates),
452                                             &cpriv);
453         if (IS_ERR(iter))
454                 return PTR_ERR(iter);
455
456         ret = ph->hops->iter_response_run(iter);
457         if (ret)
458                 return ret;
459
460         if (!clk->rate_discrete) {
461                 dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
462                         clk->range.min_rate, clk->range.max_rate,
463                         clk->range.step_size);
464         } else if (clk->list.num_rates) {
465                 sort(clk->list.rates, clk->list.num_rates,
466                      sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
467         }
468
469         return ret;
470 }
471
472 static int
473 scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
474                     u32 clk_id, u64 *value)
475 {
476         int ret;
477         struct scmi_xfer *t;
478
479         ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
480                                       sizeof(__le32), sizeof(u64), &t);
481         if (ret)
482                 return ret;
483
484         put_unaligned_le32(clk_id, t->tx.buf);
485
486         ret = ph->xops->do_xfer(ph, t);
487         if (!ret)
488                 *value = get_unaligned_le64(t->rx.buf);
489
490         ph->xops->xfer_put(ph, t);
491         return ret;
492 }
493
494 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
495                                u32 clk_id, u64 rate)
496 {
497         int ret;
498         u32 flags = 0;
499         struct scmi_xfer *t;
500         struct scmi_clock_set_rate *cfg;
501         struct clock_info *ci = ph->get_priv(ph);
502
503         ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
504         if (ret)
505                 return ret;
506
507         if (ci->max_async_req &&
508             atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
509                 flags |= CLOCK_SET_ASYNC;
510
511         cfg = t->tx.buf;
512         cfg->flags = cpu_to_le32(flags);
513         cfg->id = cpu_to_le32(clk_id);
514         cfg->value_low = cpu_to_le32(rate & 0xffffffff);
515         cfg->value_high = cpu_to_le32(rate >> 32);
516
517         if (flags & CLOCK_SET_ASYNC) {
518                 ret = ph->xops->do_xfer_with_response(ph, t);
519                 if (!ret) {
520                         struct scmi_msg_resp_set_rate_complete *resp;
521
522                         resp = t->rx.buf;
523                         if (le32_to_cpu(resp->id) == clk_id)
524                                 dev_dbg(ph->dev,
525                                         "Clk ID %d set async to %llu\n", clk_id,
526                                         get_unaligned_le64(&resp->rate_low));
527                         else
528                                 ret = -EPROTO;
529                 }
530         } else {
531                 ret = ph->xops->do_xfer(ph, t);
532         }
533
534         if (ci->max_async_req)
535                 atomic_dec(&ci->cur_async_req);
536
537         ph->xops->xfer_put(ph, t);
538         return ret;
539 }
540
541 static int
542 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
543                       enum clk_state state, u8 __unused0, u32 __unused1,
544                       bool atomic)
545 {
546         int ret;
547         struct scmi_xfer *t;
548         struct scmi_msg_clock_config_set *cfg;
549
550         if (state >= CLK_STATE_RESERVED)
551                 return -EINVAL;
552
553         ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
554                                       sizeof(*cfg), 0, &t);
555         if (ret)
556                 return ret;
557
558         t->hdr.poll_completion = atomic;
559
560         cfg = t->tx.buf;
561         cfg->id = cpu_to_le32(clk_id);
562         cfg->attributes = cpu_to_le32(state);
563
564         ret = ph->xops->do_xfer(ph, t);
565
566         ph->xops->xfer_put(ph, t);
567         return ret;
568 }
569
570 static int
571 scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
572                       u32 parent_id)
573 {
574         int ret;
575         struct scmi_xfer *t;
576         struct scmi_msg_clock_set_parent *cfg;
577         struct clock_info *ci = ph->get_priv(ph);
578         struct scmi_clock_info *clk;
579
580         if (clk_id >= ci->num_clocks)
581                 return -EINVAL;
582
583         clk = ci->clk + clk_id;
584
585         if (parent_id >= clk->num_parents)
586                 return -EINVAL;
587
588         ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET,
589                                       sizeof(*cfg), 0, &t);
590         if (ret)
591                 return ret;
592
593         t->hdr.poll_completion = false;
594
595         cfg = t->tx.buf;
596         cfg->id = cpu_to_le32(clk_id);
597         cfg->parent_id = cpu_to_le32(clk->parents[parent_id]);
598
599         ret = ph->xops->do_xfer(ph, t);
600
601         ph->xops->xfer_put(ph, t);
602
603         return ret;
604 }
605
606 static int
607 scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
608                       u32 *parent_id)
609 {
610         int ret;
611         struct scmi_xfer *t;
612
613         ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_GET,
614                                       sizeof(__le32), sizeof(u32), &t);
615         if (ret)
616                 return ret;
617
618         put_unaligned_le32(clk_id, t->tx.buf);
619
620         ret = ph->xops->do_xfer(ph, t);
621         if (!ret)
622                 *parent_id = get_unaligned_le32(t->rx.buf);
623
624         ph->xops->xfer_put(ph, t);
625         return ret;
626 }
627
628 /* For SCMI clock v2.1 and onwards */
629 static int
630 scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
631                          enum clk_state state, u8 oem_type, u32 oem_val,
632                          bool atomic)
633 {
634         int ret;
635         u32 attrs;
636         struct scmi_xfer *t;
637         struct scmi_msg_clock_config_set_v2 *cfg;
638
639         if (state == CLK_STATE_RESERVED ||
640             (!oem_type && state == CLK_STATE_UNCHANGED))
641                 return -EINVAL;
642
643         ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
644                                       sizeof(*cfg), 0, &t);
645         if (ret)
646                 return ret;
647
648         t->hdr.poll_completion = atomic;
649
650         attrs = FIELD_PREP(REGMASK_OEM_TYPE_SET, oem_type) |
651                  FIELD_PREP(REGMASK_CLK_STATE, state);
652
653         cfg = t->tx.buf;
654         cfg->id = cpu_to_le32(clk_id);
655         cfg->attributes = cpu_to_le32(attrs);
656         /* Clear in any case */
657         cfg->oem_config_val = cpu_to_le32(0);
658         if (oem_type)
659                 cfg->oem_config_val = cpu_to_le32(oem_val);
660
661         ret = ph->xops->do_xfer(ph, t);
662
663         ph->xops->xfer_put(ph, t);
664         return ret;
665 }
666
667 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id,
668                              bool atomic)
669 {
670         struct clock_info *ci = ph->get_priv(ph);
671
672         return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE,
673                                     NULL_OEM_TYPE, 0, atomic);
674 }
675
676 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id,
677                               bool atomic)
678 {
679         struct clock_info *ci = ph->get_priv(ph);
680
681         return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE,
682                                     NULL_OEM_TYPE, 0, atomic);
683 }
684
685 /* For SCMI clock v2.1 and onwards */
686 static int
687 scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
688                          u8 oem_type, u32 *attributes, bool *enabled,
689                          u32 *oem_val, bool atomic)
690 {
691         int ret;
692         u32 flags;
693         struct scmi_xfer *t;
694         struct scmi_msg_clock_config_get *cfg;
695
696         ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_GET,
697                                       sizeof(*cfg), 0, &t);
698         if (ret)
699                 return ret;
700
701         t->hdr.poll_completion = atomic;
702
703         flags = FIELD_PREP(REGMASK_OEM_TYPE_GET, oem_type);
704
705         cfg = t->tx.buf;
706         cfg->id = cpu_to_le32(clk_id);
707         cfg->flags = cpu_to_le32(flags);
708
709         ret = ph->xops->do_xfer(ph, t);
710         if (!ret) {
711                 struct scmi_msg_resp_clock_config_get *resp = t->rx.buf;
712
713                 if (attributes)
714                         *attributes = le32_to_cpu(resp->attributes);
715
716                 if (enabled)
717                         *enabled = IS_CLK_ENABLED(resp->config);
718
719                 if (oem_val && oem_type)
720                         *oem_val = le32_to_cpu(resp->oem_config_val);
721         }
722
723         ph->xops->xfer_put(ph, t);
724
725         return ret;
726 }
727
728 static int
729 scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id,
730                       u8 oem_type, u32 *attributes, bool *enabled,
731                       u32 *oem_val, bool atomic)
732 {
733         int ret;
734         struct scmi_xfer *t;
735         struct scmi_msg_resp_clock_attributes *resp;
736
737         if (!enabled)
738                 return -EINVAL;
739
740         ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
741                                       sizeof(clk_id), sizeof(*resp), &t);
742         if (ret)
743                 return ret;
744
745         t->hdr.poll_completion = atomic;
746         put_unaligned_le32(clk_id, t->tx.buf);
747         resp = t->rx.buf;
748
749         ret = ph->xops->do_xfer(ph, t);
750         if (!ret)
751                 *enabled = IS_CLK_ENABLED(resp->attributes);
752
753         ph->xops->xfer_put(ph, t);
754
755         return ret;
756 }
757
758 static int scmi_clock_state_get(const struct scmi_protocol_handle *ph,
759                                 u32 clk_id, bool *enabled, bool atomic)
760 {
761         struct clock_info *ci = ph->get_priv(ph);
762
763         return ci->clock_config_get(ph, clk_id, NULL_OEM_TYPE, NULL,
764                                     enabled, NULL, atomic);
765 }
766
767 static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph,
768                                      u32 clk_id, u8 oem_type, u32 oem_val,
769                                      bool atomic)
770 {
771         struct clock_info *ci = ph->get_priv(ph);
772
773         return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED,
774                                     oem_type, oem_val, atomic);
775 }
776
777 static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph,
778                                      u32 clk_id, u8 oem_type, u32 *oem_val,
779                                      u32 *attributes, bool atomic)
780 {
781         struct clock_info *ci = ph->get_priv(ph);
782
783         return ci->clock_config_get(ph, clk_id, oem_type, attributes,
784                                     NULL, oem_val, atomic);
785 }
786
787 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
788 {
789         struct clock_info *ci = ph->get_priv(ph);
790
791         return ci->num_clocks;
792 }
793
794 static const struct scmi_clock_info *
795 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
796 {
797         struct scmi_clock_info *clk;
798         struct clock_info *ci = ph->get_priv(ph);
799
800         if (clk_id >= ci->num_clocks)
801                 return NULL;
802
803         clk = ci->clk + clk_id;
804         if (!clk->name[0])
805                 return NULL;
806
807         return clk;
808 }
809
810 static const struct scmi_clk_proto_ops clk_proto_ops = {
811         .count_get = scmi_clock_count_get,
812         .info_get = scmi_clock_info_get,
813         .rate_get = scmi_clock_rate_get,
814         .rate_set = scmi_clock_rate_set,
815         .enable = scmi_clock_enable,
816         .disable = scmi_clock_disable,
817         .state_get = scmi_clock_state_get,
818         .config_oem_get = scmi_clock_config_oem_get,
819         .config_oem_set = scmi_clock_config_oem_set,
820         .parent_set = scmi_clock_set_parent,
821         .parent_get = scmi_clock_get_parent,
822 };
823
824 static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
825                                 u32 clk_id, int message_id, bool enable)
826 {
827         int ret;
828         struct scmi_xfer *t;
829         struct scmi_msg_clock_rate_notify *notify;
830
831         ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
832         if (ret)
833                 return ret;
834
835         notify = t->tx.buf;
836         notify->clk_id = cpu_to_le32(clk_id);
837         notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
838
839         ret = ph->xops->do_xfer(ph, t);
840
841         ph->xops->xfer_put(ph, t);
842         return ret;
843 }
844
845 static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
846                                        u8 evt_id, u32 src_id, bool enable)
847 {
848         int ret, cmd_id;
849
850         if (evt_id >= ARRAY_SIZE(evt_2_cmd))
851                 return -EINVAL;
852
853         cmd_id = evt_2_cmd[evt_id];
854         ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
855         if (ret)
856                 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
857                          evt_id, src_id, ret);
858
859         return ret;
860 }
861
862 static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
863                                          u8 evt_id, ktime_t timestamp,
864                                          const void *payld, size_t payld_sz,
865                                          void *report, u32 *src_id)
866 {
867         const struct scmi_clock_rate_notify_payld *p = payld;
868         struct scmi_clock_rate_notif_report *r = report;
869
870         if (sizeof(*p) != payld_sz ||
871             (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
872              evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
873                 return NULL;
874
875         r->timestamp = timestamp;
876         r->agent_id = le32_to_cpu(p->agent_id);
877         r->clock_id = le32_to_cpu(p->clock_id);
878         r->rate = get_unaligned_le64(&p->rate_low);
879         *src_id = r->clock_id;
880
881         return r;
882 }
883
884 static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
885 {
886         struct clock_info *ci = ph->get_priv(ph);
887
888         if (!ci)
889                 return -EINVAL;
890
891         return ci->num_clocks;
892 }
893
894 static const struct scmi_event clk_events[] = {
895         {
896                 .id = SCMI_EVENT_CLOCK_RATE_CHANGED,
897                 .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
898                 .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
899         },
900         {
901                 .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
902                 .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
903                 .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
904         },
905 };
906
907 static const struct scmi_event_ops clk_event_ops = {
908         .get_num_sources = scmi_clk_get_num_sources,
909         .set_notify_enabled = scmi_clk_set_notify_enabled,
910         .fill_custom_report = scmi_clk_fill_custom_report,
911 };
912
913 static const struct scmi_protocol_events clk_protocol_events = {
914         .queue_sz = SCMI_PROTO_QUEUE_SZ,
915         .ops = &clk_event_ops,
916         .evts = clk_events,
917         .num_events = ARRAY_SIZE(clk_events),
918 };
919
920 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
921 {
922         u32 version;
923         int clkid, ret;
924         struct clock_info *cinfo;
925
926         ret = ph->xops->version_get(ph, &version);
927         if (ret)
928                 return ret;
929
930         dev_dbg(ph->dev, "Clock Version %d.%d\n",
931                 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
932
933         cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
934         if (!cinfo)
935                 return -ENOMEM;
936
937         ret = scmi_clock_protocol_attributes_get(ph, cinfo);
938         if (ret)
939                 return ret;
940
941         cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
942                                   sizeof(*cinfo->clk), GFP_KERNEL);
943         if (!cinfo->clk)
944                 return -ENOMEM;
945
946         for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
947                 struct scmi_clock_info *clk = cinfo->clk + clkid;
948
949                 ret = scmi_clock_attributes_get(ph, clkid, clk, version);
950                 if (!ret)
951                         scmi_clock_describe_rates_get(ph, clkid, clk);
952         }
953
954         if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
955                 cinfo->clock_config_set = scmi_clock_config_set_v2;
956                 cinfo->clock_config_get = scmi_clock_config_get_v2;
957         } else {
958                 cinfo->clock_config_set = scmi_clock_config_set;
959                 cinfo->clock_config_get = scmi_clock_config_get;
960         }
961
962         cinfo->version = version;
963         return ph->set_priv(ph, cinfo);
964 }
965
966 static const struct scmi_protocol scmi_clock = {
967         .id = SCMI_PROTOCOL_CLOCK,
968         .owner = THIS_MODULE,
969         .instance_init = &scmi_clock_protocol_init,
970         .ops = &clk_proto_ops,
971         .events = &clk_protocol_events,
972 };
973
974 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)