Mention branches and keyring.
[releases.git] / soc / qcom / rpmh-rsc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  */
5
6 #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
7
8 #include <linux/atomic.h>
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/of.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20
21 #include <soc/qcom/cmd-db.h>
22 #include <soc/qcom/tcs.h>
23 #include <dt-bindings/soc/qcom,rpmh-rsc.h>
24
25 #include "rpmh-internal.h"
26
27 #define CREATE_TRACE_POINTS
28 #include "trace-rpmh.h"
29
30 #define RSC_DRV_TCS_OFFSET              672
31 #define RSC_DRV_CMD_OFFSET              20
32
33 /* DRV Configuration Information Register */
34 #define DRV_PRNT_CHLD_CONFIG            0x0C
35 #define DRV_NUM_TCS_MASK                0x3F
36 #define DRV_NUM_TCS_SHIFT               6
37 #define DRV_NCPT_MASK                   0x1F
38 #define DRV_NCPT_SHIFT                  27
39
40 /* Register offsets */
41 #define RSC_DRV_IRQ_ENABLE              0x00
42 #define RSC_DRV_IRQ_STATUS              0x04
43 #define RSC_DRV_IRQ_CLEAR               0x08
44 #define RSC_DRV_CMD_WAIT_FOR_CMPL       0x10
45 #define RSC_DRV_CONTROL                 0x14
46 #define RSC_DRV_STATUS                  0x18
47 #define RSC_DRV_CMD_ENABLE              0x1C
48 #define RSC_DRV_CMD_MSGID               0x30
49 #define RSC_DRV_CMD_ADDR                0x34
50 #define RSC_DRV_CMD_DATA                0x38
51 #define RSC_DRV_CMD_STATUS              0x3C
52 #define RSC_DRV_CMD_RESP_DATA           0x40
53
54 #define TCS_AMC_MODE_ENABLE             BIT(16)
55 #define TCS_AMC_MODE_TRIGGER            BIT(24)
56
57 /* TCS CMD register bit mask */
58 #define CMD_MSGID_LEN                   8
59 #define CMD_MSGID_RESP_REQ              BIT(8)
60 #define CMD_MSGID_WRITE                 BIT(16)
61 #define CMD_STATUS_ISSUED               BIT(8)
62 #define CMD_STATUS_COMPL                BIT(16)
63
64 static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
65 {
66         return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
67                              RSC_DRV_CMD_OFFSET * cmd_id);
68 }
69
70 static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id,
71                           u32 data)
72 {
73         writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
74                        RSC_DRV_CMD_OFFSET * cmd_id);
75 }
76
77 static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data)
78 {
79         writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
80 }
81
82 static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
83                                u32 data)
84 {
85         writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
86         for (;;) {
87                 if (data == readl(drv->tcs_base + reg +
88                                   RSC_DRV_TCS_OFFSET * tcs_id))
89                         break;
90                 udelay(1);
91         }
92 }
93
94 static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
95 {
96         return !test_bit(tcs_id, drv->tcs_in_use) &&
97                read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
98 }
99
100 static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
101 {
102         return &drv->tcs[type];
103 }
104
105 static int tcs_invalidate(struct rsc_drv *drv, int type)
106 {
107         int m;
108         struct tcs_group *tcs;
109
110         tcs = get_tcs_of_type(drv, type);
111
112         spin_lock(&tcs->lock);
113         if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
114                 spin_unlock(&tcs->lock);
115                 return 0;
116         }
117
118         for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
119                 if (!tcs_is_free(drv, m)) {
120                         spin_unlock(&tcs->lock);
121                         return -EAGAIN;
122                 }
123                 write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
124                 write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
125         }
126         bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
127         spin_unlock(&tcs->lock);
128
129         return 0;
130 }
131
132 /**
133  * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
134  *
135  * @drv: the RSC controller
136  */
137 int rpmh_rsc_invalidate(struct rsc_drv *drv)
138 {
139         int ret;
140
141         ret = tcs_invalidate(drv, SLEEP_TCS);
142         if (!ret)
143                 ret = tcs_invalidate(drv, WAKE_TCS);
144
145         return ret;
146 }
147
148 static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
149                                          const struct tcs_request *msg)
150 {
151         int type;
152         struct tcs_group *tcs;
153
154         switch (msg->state) {
155         case RPMH_ACTIVE_ONLY_STATE:
156                 type = ACTIVE_TCS;
157                 break;
158         case RPMH_WAKE_ONLY_STATE:
159                 type = WAKE_TCS;
160                 break;
161         case RPMH_SLEEP_STATE:
162                 type = SLEEP_TCS;
163                 break;
164         default:
165                 return ERR_PTR(-EINVAL);
166         }
167
168         /*
169          * If we are making an active request on a RSC that does not have a
170          * dedicated TCS for active state use, then re-purpose a wake TCS to
171          * send active votes.
172          */
173         tcs = get_tcs_of_type(drv, type);
174         if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
175                 tcs = get_tcs_of_type(drv, WAKE_TCS);
176
177         return tcs;
178 }
179
180 static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
181                                                   int tcs_id)
182 {
183         struct tcs_group *tcs;
184         int i;
185
186         for (i = 0; i < TCS_TYPE_NR; i++) {
187                 tcs = &drv->tcs[i];
188                 if (tcs->mask & BIT(tcs_id))
189                         return tcs->req[tcs_id - tcs->offset];
190         }
191
192         return NULL;
193 }
194
195 static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
196 {
197         u32 enable;
198
199         /*
200          * HW req: Clear the DRV_CONTROL and enable TCS again
201          * While clearing ensure that the AMC mode trigger is cleared
202          * and then the mode enable is cleared.
203          */
204         enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
205         enable &= ~TCS_AMC_MODE_TRIGGER;
206         write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
207         enable &= ~TCS_AMC_MODE_ENABLE;
208         write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
209
210         if (trigger) {
211                 /* Enable the AMC mode on the TCS and then trigger the TCS */
212                 enable = TCS_AMC_MODE_ENABLE;
213                 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
214                 enable |= TCS_AMC_MODE_TRIGGER;
215                 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
216         }
217 }
218
219 static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
220 {
221         u32 data;
222
223         data = read_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, 0);
224         if (enable)
225                 data |= BIT(tcs_id);
226         else
227                 data &= ~BIT(tcs_id);
228         write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, data);
229 }
230
231 /**
232  * tcs_tx_done: TX Done interrupt handler
233  */
234 static irqreturn_t tcs_tx_done(int irq, void *p)
235 {
236         struct rsc_drv *drv = p;
237         int i, j, err = 0;
238         unsigned long irq_status;
239         const struct tcs_request *req;
240         struct tcs_cmd *cmd;
241
242         irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0);
243
244         for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
245                 req = get_req_from_tcs(drv, i);
246                 if (!req) {
247                         WARN_ON(1);
248                         goto skip;
249                 }
250
251                 err = 0;
252                 for (j = 0; j < req->num_cmds; j++) {
253                         u32 sts;
254
255                         cmd = &req->cmds[j];
256                         sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j);
257                         if (!(sts & CMD_STATUS_ISSUED) ||
258                            ((req->wait_for_compl || cmd->wait) &&
259                            !(sts & CMD_STATUS_COMPL))) {
260                                 pr_err("Incomplete request: %s: addr=%#x data=%#x",
261                                        drv->name, cmd->addr, cmd->data);
262                                 err = -EIO;
263                         }
264                 }
265
266                 trace_rpmh_tx_done(drv, i, req, err);
267
268                 /*
269                  * If wake tcs was re-purposed for sending active
270                  * votes, clear AMC trigger & enable modes and
271                  * disable interrupt for this TCS
272                  */
273                 if (!drv->tcs[ACTIVE_TCS].num_tcs)
274                         __tcs_set_trigger(drv, i, false);
275 skip:
276                 /* Reclaim the TCS */
277                 write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
278                 write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
279                 write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
280                 spin_lock(&drv->lock);
281                 clear_bit(i, drv->tcs_in_use);
282                 /*
283                  * Disable interrupt for WAKE TCS to avoid being
284                  * spammed with interrupts coming when the solver
285                  * sends its wake votes.
286                  */
287                 if (!drv->tcs[ACTIVE_TCS].num_tcs)
288                         enable_tcs_irq(drv, i, false);
289                 spin_unlock(&drv->lock);
290                 if (req)
291                         rpmh_tx_done(req, err);
292         }
293
294         return IRQ_HANDLED;
295 }
296
297 static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
298                                const struct tcs_request *msg)
299 {
300         u32 msgid, cmd_msgid;
301         u32 cmd_enable = 0;
302         u32 cmd_complete;
303         struct tcs_cmd *cmd;
304         int i, j;
305
306         cmd_msgid = CMD_MSGID_LEN;
307         cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
308         cmd_msgid |= CMD_MSGID_WRITE;
309
310         cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
311
312         for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
313                 cmd = &msg->cmds[i];
314                 cmd_enable |= BIT(j);
315                 cmd_complete |= cmd->wait << j;
316                 msgid = cmd_msgid;
317                 msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
318
319                 write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
320                 write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
321                 write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
322                 trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
323         }
324
325         write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
326         cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
327         write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
328 }
329
330 static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
331                                   const struct tcs_request *msg)
332 {
333         unsigned long curr_enabled;
334         u32 addr;
335         int i, j, k;
336         int tcs_id = tcs->offset;
337
338         for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
339                 if (tcs_is_free(drv, tcs_id))
340                         continue;
341
342                 curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
343
344                 for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
345                         addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
346                         for (k = 0; k < msg->num_cmds; k++) {
347                                 if (addr == msg->cmds[k].addr)
348                                         return -EBUSY;
349                         }
350                 }
351         }
352
353         return 0;
354 }
355
356 static int find_free_tcs(struct tcs_group *tcs)
357 {
358         int i;
359
360         for (i = 0; i < tcs->num_tcs; i++) {
361                 if (tcs_is_free(tcs->drv, tcs->offset + i))
362                         return tcs->offset + i;
363         }
364
365         return -EBUSY;
366 }
367
368 static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
369 {
370         struct tcs_group *tcs;
371         int tcs_id;
372         unsigned long flags;
373         int ret;
374
375         tcs = get_tcs_for_msg(drv, msg);
376         if (IS_ERR(tcs))
377                 return PTR_ERR(tcs);
378
379         spin_lock_irqsave(&tcs->lock, flags);
380         spin_lock(&drv->lock);
381         /*
382          * The h/w does not like if we send a request to the same address,
383          * when one is already in-flight or being processed.
384          */
385         ret = check_for_req_inflight(drv, tcs, msg);
386         if (ret) {
387                 spin_unlock(&drv->lock);
388                 goto done_write;
389         }
390
391         tcs_id = find_free_tcs(tcs);
392         if (tcs_id < 0) {
393                 ret = tcs_id;
394                 spin_unlock(&drv->lock);
395                 goto done_write;
396         }
397
398         tcs->req[tcs_id - tcs->offset] = msg;
399         set_bit(tcs_id, drv->tcs_in_use);
400         if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
401                 /*
402                  * Clear previously programmed WAKE commands in selected
403                  * repurposed TCS to avoid triggering them. tcs->slots will be
404                  * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
405                  */
406                 write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
407                 write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
408                 enable_tcs_irq(drv, tcs_id, true);
409         }
410         spin_unlock(&drv->lock);
411
412         __tcs_buffer_write(drv, tcs_id, 0, msg);
413         __tcs_set_trigger(drv, tcs_id, true);
414
415 done_write:
416         spin_unlock_irqrestore(&tcs->lock, flags);
417         return ret;
418 }
419
420 /**
421  * rpmh_rsc_send_data: Validate the incoming message and write to the
422  * appropriate TCS block.
423  *
424  * @drv: the controller
425  * @msg: the data to be sent
426  *
427  * Return: 0 on success, -EINVAL on error.
428  * Note: This call blocks until a valid data is written to the TCS.
429  */
430 int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
431 {
432         int ret;
433
434         if (!msg || !msg->cmds || !msg->num_cmds ||
435             msg->num_cmds > MAX_RPMH_PAYLOAD) {
436                 WARN_ON(1);
437                 return -EINVAL;
438         }
439
440         do {
441                 ret = tcs_write(drv, msg);
442                 if (ret == -EBUSY) {
443                         pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
444                                             msg->cmds[0].addr);
445                         udelay(10);
446                 }
447         } while (ret == -EBUSY);
448
449         return ret;
450 }
451
452 static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd,
453                       int len)
454 {
455         int i, j;
456
457         /* Check for already cached commands */
458         for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) {
459                 if (tcs->cmd_cache[i] != cmd[0].addr)
460                         continue;
461                 if (i + len >= tcs->num_tcs * tcs->ncpt)
462                         goto seq_err;
463                 for (j = 0; j < len; j++) {
464                         if (tcs->cmd_cache[i + j] != cmd[j].addr)
465                                 goto seq_err;
466                 }
467                 return i;
468         }
469
470         return -ENODATA;
471
472 seq_err:
473         WARN(1, "Message does not match previous sequence.\n");
474         return -EINVAL;
475 }
476
477 static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
478                       int *tcs_id, int *cmd_id)
479 {
480         int slot, offset;
481         int i = 0;
482
483         /* Find if we already have the msg in our TCS */
484         slot = find_match(tcs, msg->cmds, msg->num_cmds);
485         if (slot >= 0)
486                 goto copy_data;
487
488         /* Do over, until we can fit the full payload in a TCS */
489         do {
490                 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
491                                                   i, msg->num_cmds, 0);
492                 if (slot == tcs->num_tcs * tcs->ncpt)
493                         return -ENOMEM;
494                 i += tcs->ncpt;
495         } while (slot + msg->num_cmds - 1 >= i);
496
497 copy_data:
498         bitmap_set(tcs->slots, slot, msg->num_cmds);
499         /* Copy the addresses of the resources over to the slots */
500         for (i = 0; i < msg->num_cmds; i++)
501                 tcs->cmd_cache[slot + i] = msg->cmds[i].addr;
502
503         offset = slot / tcs->ncpt;
504         *tcs_id = offset + tcs->offset;
505         *cmd_id = slot % tcs->ncpt;
506
507         return 0;
508 }
509
510 static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
511 {
512         struct tcs_group *tcs;
513         int tcs_id = 0, cmd_id = 0;
514         unsigned long flags;
515         int ret;
516
517         tcs = get_tcs_for_msg(drv, msg);
518         if (IS_ERR(tcs))
519                 return PTR_ERR(tcs);
520
521         spin_lock_irqsave(&tcs->lock, flags);
522         /* find the TCS id and the command in the TCS to write to */
523         ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
524         if (!ret)
525                 __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
526         spin_unlock_irqrestore(&tcs->lock, flags);
527
528         return ret;
529 }
530
531 /**
532  * rpmh_rsc_write_ctrl_data: Write request to the controller
533  *
534  * @drv: the controller
535  * @msg: the data to be written to the controller
536  *
537  * There is no response returned for writing the request to the controller.
538  */
539 int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
540 {
541         if (!msg || !msg->cmds || !msg->num_cmds ||
542             msg->num_cmds > MAX_RPMH_PAYLOAD) {
543                 pr_err("Payload error\n");
544                 return -EINVAL;
545         }
546
547         /* Data sent to this API will not be sent immediately */
548         if (msg->state == RPMH_ACTIVE_ONLY_STATE)
549                 return -EINVAL;
550
551         return tcs_ctrl_write(drv, msg);
552 }
553
554 static int rpmh_probe_tcs_config(struct platform_device *pdev,
555                                  struct rsc_drv *drv)
556 {
557         struct tcs_type_config {
558                 u32 type;
559                 u32 n;
560         } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
561         struct device_node *dn = pdev->dev.of_node;
562         u32 config, max_tcs, ncpt, offset;
563         int i, ret, n, st = 0;
564         struct tcs_group *tcs;
565         struct resource *res;
566         void __iomem *base;
567         char drv_id[10] = {0};
568
569         snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
570         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
571         base = devm_ioremap_resource(&pdev->dev, res);
572         if (IS_ERR(base))
573                 return PTR_ERR(base);
574
575         ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
576         if (ret)
577                 return ret;
578         drv->tcs_base = base + offset;
579
580         config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
581
582         max_tcs = config;
583         max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
584         max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
585
586         ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
587         ncpt = ncpt >> DRV_NCPT_SHIFT;
588
589         n = of_property_count_u32_elems(dn, "qcom,tcs-config");
590         if (n != 2 * TCS_TYPE_NR)
591                 return -EINVAL;
592
593         for (i = 0; i < TCS_TYPE_NR; i++) {
594                 ret = of_property_read_u32_index(dn, "qcom,tcs-config",
595                                                  i * 2, &tcs_cfg[i].type);
596                 if (ret)
597                         return ret;
598                 if (tcs_cfg[i].type >= TCS_TYPE_NR)
599                         return -EINVAL;
600
601                 ret = of_property_read_u32_index(dn, "qcom,tcs-config",
602                                                  i * 2 + 1, &tcs_cfg[i].n);
603                 if (ret)
604                         return ret;
605                 if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
606                         return -EINVAL;
607         }
608
609         for (i = 0; i < TCS_TYPE_NR; i++) {
610                 tcs = &drv->tcs[tcs_cfg[i].type];
611                 if (tcs->drv)
612                         return -EINVAL;
613                 tcs->drv = drv;
614                 tcs->type = tcs_cfg[i].type;
615                 tcs->num_tcs = tcs_cfg[i].n;
616                 tcs->ncpt = ncpt;
617                 spin_lock_init(&tcs->lock);
618
619                 if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
620                         continue;
621
622                 if (st + tcs->num_tcs > max_tcs ||
623                     st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
624                         return -EINVAL;
625
626                 tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
627                 tcs->offset = st;
628                 st += tcs->num_tcs;
629
630                 /*
631                  * Allocate memory to cache sleep and wake requests to
632                  * avoid reading TCS register memory.
633                  */
634                 if (tcs->type == ACTIVE_TCS)
635                         continue;
636
637                 tcs->cmd_cache = devm_kcalloc(&pdev->dev,
638                                               tcs->num_tcs * ncpt, sizeof(u32),
639                                               GFP_KERNEL);
640                 if (!tcs->cmd_cache)
641                         return -ENOMEM;
642         }
643
644         drv->num_tcs = st;
645
646         return 0;
647 }
648
649 static int rpmh_rsc_probe(struct platform_device *pdev)
650 {
651         struct device_node *dn = pdev->dev.of_node;
652         struct rsc_drv *drv;
653         int ret, irq;
654
655         /*
656          * Even though RPMh doesn't directly use cmd-db, all of its children
657          * do. To avoid adding this check to our children we'll do it now.
658          */
659         ret = cmd_db_ready();
660         if (ret) {
661                 if (ret != -EPROBE_DEFER)
662                         dev_err(&pdev->dev, "Command DB not available (%d)\n",
663                                                                         ret);
664                 return ret;
665         }
666
667         drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
668         if (!drv)
669                 return -ENOMEM;
670
671         ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
672         if (ret)
673                 return ret;
674
675         drv->name = of_get_property(dn, "label", NULL);
676         if (!drv->name)
677                 drv->name = dev_name(&pdev->dev);
678
679         ret = rpmh_probe_tcs_config(pdev, drv);
680         if (ret)
681                 return ret;
682
683         spin_lock_init(&drv->lock);
684         bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
685
686         irq = platform_get_irq(pdev, drv->id);
687         if (irq < 0)
688                 return irq;
689
690         ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
691                                IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
692                                drv->name, drv);
693         if (ret)
694                 return ret;
695
696         /* Enable the active TCS to send requests immediately */
697         write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
698
699         spin_lock_init(&drv->client.cache_lock);
700         INIT_LIST_HEAD(&drv->client.cache);
701         INIT_LIST_HEAD(&drv->client.batch_cache);
702
703         dev_set_drvdata(&pdev->dev, drv);
704
705         return devm_of_platform_populate(&pdev->dev);
706 }
707
708 static const struct of_device_id rpmh_drv_match[] = {
709         { .compatible = "qcom,rpmh-rsc", },
710         { }
711 };
712
713 static struct platform_driver rpmh_driver = {
714         .probe = rpmh_rsc_probe,
715         .driver = {
716                   .name = "rpmh",
717                   .of_match_table = rpmh_drv_match,
718                   .suppress_bind_attrs = true,
719         },
720 };
721
722 static int __init rpmh_driver_init(void)
723 {
724         return platform_driver_register(&rpmh_driver);
725 }
726 arch_initcall(rpmh_driver_init);