GNU Linux-libre 4.19.211-gnu1
[releases.git] / drivers / gpu / drm / nouveau / nvkm / falcon / msgqueue.c
1 /*
2  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "msgqueue.h"
25 #include <engine/falcon.h>
26
27 #include <subdev/secboot.h>
28
29
30 #define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr)
31 #define QUEUE_ALIGNMENT 4
32 /* max size of the messages we can receive */
33 #define MSG_BUF_SIZE 128
34
35 static int
36 msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
37 {
38         struct nvkm_falcon *falcon = priv->falcon;
39
40         mutex_lock(&queue->mutex);
41
42         queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
43
44         return 0;
45 }
46
47 static void
48 msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
49                 bool commit)
50 {
51         struct nvkm_falcon *falcon = priv->falcon;
52
53         if (commit)
54                 nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
55
56         mutex_unlock(&queue->mutex);
57 }
58
59 static bool
60 msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
61 {
62         struct nvkm_falcon *falcon = priv->falcon;
63         u32 head, tail;
64
65         head = nvkm_falcon_rd32(falcon, queue->head_reg);
66         tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
67
68         return head == tail;
69 }
70
71 static int
72 msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
73               void *data, u32 size)
74 {
75         struct nvkm_falcon *falcon = priv->falcon;
76         const struct nvkm_subdev *subdev = priv->falcon->owner;
77         u32 head, tail, available;
78
79         head = nvkm_falcon_rd32(falcon, queue->head_reg);
80         /* has the buffer looped? */
81         if (head < queue->position)
82                 queue->position = queue->offset;
83
84         tail = queue->position;
85
86         available = head - tail;
87
88         if (available == 0) {
89                 nvkm_warn(subdev, "no message data available\n");
90                 return 0;
91         }
92
93         if (size > available) {
94                 nvkm_warn(subdev, "message data smaller than read request\n");
95                 size = available;
96         }
97
98         nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data);
99         queue->position += ALIGN(size, QUEUE_ALIGNMENT);
100
101         return size;
102 }
103
104 static int
105 msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
106                struct nvkm_msgqueue_hdr *hdr)
107 {
108         const struct nvkm_subdev *subdev = priv->falcon->owner;
109         int err;
110
111         err = msg_queue_open(priv, queue);
112         if (err) {
113                 nvkm_error(subdev, "fail to open queue %d\n", queue->index);
114                 return err;
115         }
116
117         if (msg_queue_empty(priv, queue)) {
118                 err = 0;
119                 goto close;
120         }
121
122         err = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
123         if (err >= 0 && err != HDR_SIZE)
124                 err = -EINVAL;
125         if (err < 0) {
126                 nvkm_error(subdev, "failed to read message header: %d\n", err);
127                 goto close;
128         }
129
130         if (hdr->size > MSG_BUF_SIZE) {
131                 nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
132                 err = -ENOSPC;
133                 goto close;
134         }
135
136         if (hdr->size > HDR_SIZE) {
137                 u32 read_size = hdr->size - HDR_SIZE;
138
139                 err = msg_queue_pop(priv, queue, (hdr + 1), read_size);
140                 if (err >= 0 && err != read_size)
141                         err = -EINVAL;
142                 if (err < 0) {
143                         nvkm_error(subdev, "failed to read message: %d\n", err);
144                         goto close;
145                 }
146         }
147
148 close:
149         msg_queue_close(priv, queue, (err >= 0));
150
151         return err;
152 }
153
154 static bool
155 cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
156                    u32 size, bool *rewind)
157 {
158         struct nvkm_falcon *falcon = priv->falcon;
159         u32 head, tail, free;
160
161         size = ALIGN(size, QUEUE_ALIGNMENT);
162
163         head = nvkm_falcon_rd32(falcon, queue->head_reg);
164         tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
165
166         if (head >= tail) {
167                 free = queue->offset + queue->size - head;
168                 free -= HDR_SIZE;
169
170                 if (size > free) {
171                         *rewind = true;
172                         head = queue->offset;
173                 }
174         }
175
176         if (head < tail)
177                 free = tail - head - 1;
178
179         return size <= free;
180 }
181
182 static int
183 cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
184                void *data, u32 size)
185 {
186         nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0);
187         queue->position += ALIGN(size, QUEUE_ALIGNMENT);
188
189         return 0;
190 }
191
192 /* REWIND unit is always 0x00 */
193 #define MSGQUEUE_UNIT_REWIND 0x00
194
195 static void
196 cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
197 {
198         const struct nvkm_subdev *subdev = priv->falcon->owner;
199         struct nvkm_msgqueue_hdr cmd;
200         int err;
201
202         cmd.unit_id = MSGQUEUE_UNIT_REWIND;
203         cmd.size = sizeof(cmd);
204         err = cmd_queue_push(priv, queue, &cmd, cmd.size);
205         if (err)
206                 nvkm_error(subdev, "queue %d rewind failed\n", queue->index);
207         else
208                 nvkm_error(subdev, "queue %d rewinded\n", queue->index);
209
210         queue->position = queue->offset;
211 }
212
213 static int
214 cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
215                u32 size)
216 {
217         struct nvkm_falcon *falcon = priv->falcon;
218         const struct nvkm_subdev *subdev = priv->falcon->owner;
219         bool rewind = false;
220
221         mutex_lock(&queue->mutex);
222
223         if (!cmd_queue_has_room(priv, queue, size, &rewind)) {
224                 nvkm_error(subdev, "queue full\n");
225                 mutex_unlock(&queue->mutex);
226                 return -EAGAIN;
227         }
228
229         queue->position = nvkm_falcon_rd32(falcon, queue->head_reg);
230
231         if (rewind)
232                 cmd_queue_rewind(priv, queue);
233
234         return 0;
235 }
236
237 static void
238 cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
239                 bool commit)
240 {
241         struct nvkm_falcon *falcon = priv->falcon;
242
243         if (commit)
244                 nvkm_falcon_wr32(falcon, queue->head_reg, queue->position);
245
246         mutex_unlock(&queue->mutex);
247 }
248
249 static int
250 cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
251           struct nvkm_msgqueue_queue *queue)
252 {
253         const struct nvkm_subdev *subdev = priv->falcon->owner;
254         static unsigned timeout = 2000;
255         unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
256         int ret = -EAGAIN;
257         bool commit = true;
258
259         while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
260                 ret = cmd_queue_open(priv, queue, cmd->size);
261         if (ret) {
262                 nvkm_error(subdev, "pmu_queue_open_write failed\n");
263                 return ret;
264         }
265
266         ret = cmd_queue_push(priv, queue, cmd, cmd->size);
267         if (ret) {
268                 nvkm_error(subdev, "pmu_queue_push failed\n");
269                 commit = false;
270         }
271
272            cmd_queue_close(priv, queue, commit);
273
274         return ret;
275 }
276
277 static struct nvkm_msgqueue_seq *
278 msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
279 {
280         const struct nvkm_subdev *subdev = priv->falcon->owner;
281         struct nvkm_msgqueue_seq *seq;
282         u32 index;
283
284         mutex_lock(&priv->seq_lock);
285
286         index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
287
288         if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
289                 nvkm_error(subdev, "no free sequence available\n");
290                 mutex_unlock(&priv->seq_lock);
291                 return ERR_PTR(-EAGAIN);
292         }
293
294         set_bit(index, priv->seq_tbl);
295
296         mutex_unlock(&priv->seq_lock);
297
298         seq = &priv->seq[index];
299         seq->state = SEQ_STATE_PENDING;
300
301         return seq;
302 }
303
304 static void
305 msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
306 {
307         /* no need to acquire seq_lock since clear_bit is atomic */
308         seq->state = SEQ_STATE_FREE;
309         seq->callback = NULL;
310         seq->completion = NULL;
311         clear_bit(seq->id, priv->seq_tbl);
312 }
313
314 /* specifies that we want to know the command status in the answer message */
315 #define CMD_FLAGS_STATUS BIT(0)
316 /* specifies that we want an interrupt when the answer message is queued */
317 #define CMD_FLAGS_INTR BIT(1)
318
319 int
320 nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
321                    struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb,
322                    struct completion *completion, bool wait_init)
323 {
324         struct nvkm_msgqueue_seq *seq;
325         struct nvkm_msgqueue_queue *queue;
326         int ret;
327
328         if (wait_init && !wait_for_completion_timeout(&priv->init_done,
329                                          msecs_to_jiffies(1000)))
330                 return -ETIMEDOUT;
331
332         queue = priv->func->cmd_queue(priv, prio);
333         if (IS_ERR(queue))
334                 return PTR_ERR(queue);
335
336         seq = msgqueue_seq_acquire(priv);
337         if (IS_ERR(seq))
338                 return PTR_ERR(seq);
339
340         cmd->seq_id = seq->id;
341         cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
342
343         seq->callback = cb;
344         seq->state = SEQ_STATE_USED;
345         seq->completion = completion;
346
347         ret = cmd_write(priv, cmd, queue);
348         if (ret) {
349                 seq->state = SEQ_STATE_PENDING;
350                       msgqueue_seq_release(priv, seq);
351         }
352
353         return ret;
354 }
355
356 static int
357 msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
358 {
359         const struct nvkm_subdev *subdev = priv->falcon->owner;
360         struct nvkm_msgqueue_seq *seq;
361
362         seq = &priv->seq[hdr->seq_id];
363         if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
364                 nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
365                 return -EINVAL;
366         }
367
368         if (seq->state == SEQ_STATE_USED) {
369                 if (seq->callback)
370                         seq->callback(priv, hdr);
371         }
372
373         if (seq->completion)
374                 complete(seq->completion);
375
376            msgqueue_seq_release(priv, seq);
377
378         return 0;
379 }
380
381 static int
382 msgqueue_handle_init_msg(struct nvkm_msgqueue *priv,
383                          struct nvkm_msgqueue_hdr *hdr)
384 {
385         struct nvkm_falcon *falcon = priv->falcon;
386         const struct nvkm_subdev *subdev = falcon->owner;
387         u32 tail;
388         u32 tail_reg;
389         int ret;
390
391         /*
392          * Of course the message queue registers vary depending on the falcon
393          * used...
394          */
395         switch (falcon->owner->index) {
396         case NVKM_SUBDEV_PMU:
397                 tail_reg = 0x4cc;
398                 break;
399         case NVKM_ENGINE_SEC2:
400                 tail_reg = 0xa34;
401                 break;
402         default:
403                 nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n",
404                            nvkm_subdev_name[falcon->owner->index]);
405                 return -EINVAL;
406         }
407
408         /*
409          * Read the message - queues are not initialized yet so we cannot rely
410          * on msg_queue_read()
411          */
412         tail = nvkm_falcon_rd32(falcon, tail_reg);
413         nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr);
414
415         if (hdr->size > MSG_BUF_SIZE) {
416                 nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
417                 return -ENOSPC;
418         }
419
420         nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0,
421                               (hdr + 1));
422
423         tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
424         nvkm_falcon_wr32(falcon, tail_reg, tail);
425
426         ret = priv->func->init_func->init_callback(priv, hdr);
427         if (ret)
428                 return ret;
429
430         return 0;
431 }
432
433 void
434 nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
435                            struct nvkm_msgqueue_queue *queue)
436 {
437         /*
438          * We are invoked from a worker thread, so normally we have plenty of
439          * stack space to work with.
440          */
441         u8 msg_buffer[MSG_BUF_SIZE];
442         struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer;
443         int ret;
444
445         /* the first message we receive must be the init message */
446         if ((!priv->init_msg_received)) {
447                 ret = msgqueue_handle_init_msg(priv, hdr);
448                 if (!ret)
449                         priv->init_msg_received = true;
450         } else {
451                 while (msg_queue_read(priv, queue, hdr) > 0)
452                         msgqueue_msg_handle(priv, hdr);
453         }
454 }
455
456 void
457 nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
458 {
459         if (!queue || !queue->func || !queue->func->init_func)
460                 return;
461
462         queue->func->init_func->gen_cmdline(queue, buf);
463 }
464
465 int
466 nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue,
467                                unsigned long falcon_mask)
468 {
469         unsigned long falcon;
470
471         if (!queue || !queue->func->acr_func)
472                 return -ENODEV;
473
474         /* Does the firmware support booting multiple falcons? */
475         if (queue->func->acr_func->boot_multiple_falcons)
476                 return queue->func->acr_func->boot_multiple_falcons(queue,
477                                                                    falcon_mask);
478
479         /* Else boot all requested falcons individually */
480         if (!queue->func->acr_func->boot_falcon)
481                 return -ENODEV;
482
483         for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
484                 int ret = queue->func->acr_func->boot_falcon(queue, falcon);
485
486                 if (ret)
487                         return ret;
488         }
489
490         return 0;
491 }
492
493 int
494 nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
495                   const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue)
496 {
497         const struct nvkm_subdev *subdev = falcon->owner;
498         int ret = -EINVAL;
499
500         switch (version) {
501         case 0x0137c63d:
502                 ret = msgqueue_0137c63d_new(falcon, sb, queue);
503                 break;
504         case 0x0137bca5:
505                 ret = msgqueue_0137bca5_new(falcon, sb, queue);
506                 break;
507         case 0x0148cdec:
508         case 0x015ccf3e:
509         case 0x0167d263:
510                 ret = msgqueue_0148cdec_new(falcon, sb, queue);
511                 break;
512         default:
513                 nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
514                            version);
515                 break;
516         }
517
518         if (ret == 0) {
519                 nvkm_debug(subdev, "firmware version: 0x%08x\n", version);
520                 (*queue)->fw_version = version;
521         }
522
523         return ret;
524 }
525
526 void
527 nvkm_msgqueue_del(struct nvkm_msgqueue **queue)
528 {
529         if (*queue) {
530                 (*queue)->func->dtor(*queue);
531                 *queue = NULL;
532         }
533 }
534
535 void
536 nvkm_msgqueue_recv(struct nvkm_msgqueue *queue)
537 {
538         if (!queue->func || !queue->func->recv) {
539                 const struct nvkm_subdev *subdev = queue->falcon->owner;
540
541                 nvkm_warn(subdev, "missing msgqueue recv function\n");
542                 return;
543         }
544
545         queue->func->recv(queue);
546 }
547
548 int
549 nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue)
550 {
551         /* firmware not set yet... */
552         if (!queue)
553                 return 0;
554
555         queue->init_msg_received = false;
556         reinit_completion(&queue->init_done);
557
558         return 0;
559 }
560
561 void
562 nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
563                    struct nvkm_falcon *falcon,
564                    struct nvkm_msgqueue *queue)
565 {
566         int i;
567
568         queue->func = func;
569         queue->falcon = falcon;
570         mutex_init(&queue->seq_lock);
571         for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
572                 queue->seq[i].id = i;
573
574         init_completion(&queue->init_done);
575
576
577 }