1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ALSA sequencer Timing queue handling
4 * Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
7 * Nov. 13, 1999 Takashi Iwai <iwai@ww.uni-erlangen.de>
8 * - Queues are allocated dynamically via ioctl.
9 * - When owner client is deleted, all owned queues are deleted, too.
10 * - Owner of unlocked queue is kept unmodified even if it is
11 * manipulated by other clients.
12 * - Owner field in SET_QUEUE_OWNER ioctl must be identical with the
13 * caller client. i.e. Changing owner to a third client is not
16 * Aug. 30, 2000 Takashi Iwai
17 * - Queues are managed in static array again, but with better way.
18 * The API itself is identical.
19 * - The queue is locked when struct snd_seq_queue pointer is returned via
20 * queueptr(). This pointer *MUST* be released afterward by
22 * - Addition of experimental sync support.
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <sound/core.h>
29 #include "seq_memory.h"
30 #include "seq_queue.h"
31 #include "seq_clientmgr.h"
33 #include "seq_timer.h"
36 /* list of allocated queues */
37 static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES];
38 static DEFINE_SPINLOCK(queue_list_lock);
39 /* number of queues allocated */
40 static int num_queues;
42 int snd_seq_queue_get_cur_queues(void)
47 /*----------------------------------------------------------------*/
49 /* assign queue id and insert to list */
50 static int queue_list_add(struct snd_seq_queue *q)
55 spin_lock_irqsave(&queue_list_lock, flags);
56 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
57 if (! queue_list[i]) {
61 spin_unlock_irqrestore(&queue_list_lock, flags);
65 spin_unlock_irqrestore(&queue_list_lock, flags);
69 static struct snd_seq_queue *queue_list_remove(int id, int client)
71 struct snd_seq_queue *q;
74 spin_lock_irqsave(&queue_list_lock, flags);
77 spin_lock(&q->owner_lock);
78 if (q->owner == client) {
81 spin_unlock(&q->owner_lock);
82 queue_list[id] = NULL;
84 spin_unlock_irqrestore(&queue_list_lock, flags);
87 spin_unlock(&q->owner_lock);
89 spin_unlock_irqrestore(&queue_list_lock, flags);
93 /*----------------------------------------------------------------*/
95 /* create new queue (constructor) */
96 static struct snd_seq_queue *queue_new(int owner, int locked)
98 struct snd_seq_queue *q;
100 q = kzalloc(sizeof(*q), GFP_KERNEL);
104 spin_lock_init(&q->owner_lock);
105 spin_lock_init(&q->check_lock);
106 mutex_init(&q->timer_mutex);
107 snd_use_lock_init(&q->use_lock);
110 q->tickq = snd_seq_prioq_new();
111 q->timeq = snd_seq_prioq_new();
112 q->timer = snd_seq_timer_new();
113 if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
114 snd_seq_prioq_delete(&q->tickq);
115 snd_seq_prioq_delete(&q->timeq);
116 snd_seq_timer_delete(&q->timer);
128 /* delete queue (destructor) */
129 static void queue_delete(struct snd_seq_queue *q)
131 /* stop and release the timer */
132 mutex_lock(&q->timer_mutex);
133 snd_seq_timer_stop(q->timer);
134 snd_seq_timer_close(q);
135 mutex_unlock(&q->timer_mutex);
136 /* wait until access free */
137 snd_use_lock_sync(&q->use_lock);
138 /* release resources... */
139 snd_seq_prioq_delete(&q->tickq);
140 snd_seq_prioq_delete(&q->timeq);
141 snd_seq_timer_delete(&q->timer);
147 /*----------------------------------------------------------------*/
149 /* delete all existing queues */
150 void snd_seq_queues_delete(void)
155 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
157 queue_delete(queue_list[i]);
161 static void queue_use(struct snd_seq_queue *queue, int client, int use);
163 /* allocate a new queue -
164 * return pointer to new queue or ERR_PTR(-errno) for error
165 * The new queue's use_lock is set to 1. It is the caller's responsibility to
166 * call snd_use_lock_free(&q->use_lock).
168 struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
170 struct snd_seq_queue *q;
172 q = queue_new(client, locked);
174 return ERR_PTR(-ENOMEM);
175 q->info_flags = info_flags;
176 queue_use(q, client, 1);
177 snd_use_lock_use(&q->use_lock);
178 if (queue_list_add(q) < 0) {
179 snd_use_lock_free(&q->use_lock);
181 return ERR_PTR(-ENOMEM);
186 /* delete a queue - queue must be owned by the client */
187 int snd_seq_queue_delete(int client, int queueid)
189 struct snd_seq_queue *q;
191 if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
193 q = queue_list_remove(queueid, client);
202 /* return pointer to queue structure for specified id */
203 struct snd_seq_queue *queueptr(int queueid)
205 struct snd_seq_queue *q;
208 if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
210 spin_lock_irqsave(&queue_list_lock, flags);
211 q = queue_list[queueid];
213 snd_use_lock_use(&q->use_lock);
214 spin_unlock_irqrestore(&queue_list_lock, flags);
218 /* return the (first) queue matching with the specified name */
219 struct snd_seq_queue *snd_seq_queue_find_name(char *name)
222 struct snd_seq_queue *q;
224 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
225 if ((q = queueptr(i)) != NULL) {
226 if (strncmp(q->name, name, sizeof(q->name)) == 0)
235 /* -------------------------------------------------------- */
237 #define MAX_CELL_PROCESSES_IN_QUEUE 1000
239 void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
242 struct snd_seq_event_cell *cell;
243 snd_seq_tick_time_t cur_tick;
244 snd_seq_real_time_t cur_time;
250 /* make this function non-reentrant */
251 spin_lock_irqsave(&q->check_lock, flags);
252 if (q->check_blocked) {
254 spin_unlock_irqrestore(&q->check_lock, flags);
255 return; /* other thread is already checking queues */
257 q->check_blocked = 1;
258 spin_unlock_irqrestore(&q->check_lock, flags);
261 /* Process tick queue... */
262 cur_tick = snd_seq_timer_get_cur_tick(q->timer);
264 cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
267 snd_seq_dispatch_event(cell, atomic, hop);
268 if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
269 goto out; /* the rest processed at the next batch */
272 /* Process time queue... */
273 cur_time = snd_seq_timer_get_cur_time(q->timer, false);
275 cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
278 snd_seq_dispatch_event(cell, atomic, hop);
279 if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
280 goto out; /* the rest processed at the next batch */
285 spin_lock_irqsave(&q->check_lock, flags);
286 if (q->check_again) {
288 if (processed < MAX_CELL_PROCESSES_IN_QUEUE) {
289 spin_unlock_irqrestore(&q->check_lock, flags);
293 q->check_blocked = 0;
294 spin_unlock_irqrestore(&q->check_lock, flags);
298 /* enqueue a event to singe queue */
299 int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop)
302 struct snd_seq_queue *q;
304 if (snd_BUG_ON(!cell))
306 dest = cell->event.queue; /* destination queue */
310 /* handle relative time stamps, convert them into absolute */
311 if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) {
312 switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
313 case SNDRV_SEQ_TIME_STAMP_TICK:
314 cell->event.time.tick += q->timer->tick.cur_tick;
317 case SNDRV_SEQ_TIME_STAMP_REAL:
318 snd_seq_inc_real_time(&cell->event.time.time,
319 &q->timer->cur_time);
322 cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK;
323 cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS;
325 /* enqueue event in the real-time or midi queue */
326 switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
327 case SNDRV_SEQ_TIME_STAMP_TICK:
328 err = snd_seq_prioq_cell_in(q->tickq, cell);
331 case SNDRV_SEQ_TIME_STAMP_REAL:
333 err = snd_seq_prioq_cell_in(q->timeq, cell);
338 queuefree(q); /* unlock */
342 /* trigger dispatching */
343 snd_seq_check_queue(q, atomic, hop);
345 queuefree(q); /* unlock */
351 /*----------------------------------------------------------------*/
353 static inline int check_access(struct snd_seq_queue *q, int client)
355 return (q->owner == client) || (!q->locked && !q->klocked);
358 /* check if the client has permission to modify queue parameters.
359 * if it does, lock the queue
361 static int queue_access_lock(struct snd_seq_queue *q, int client)
366 spin_lock_irqsave(&q->owner_lock, flags);
367 access_ok = check_access(q, client);
370 spin_unlock_irqrestore(&q->owner_lock, flags);
374 /* unlock the queue */
375 static inline void queue_access_unlock(struct snd_seq_queue *q)
379 spin_lock_irqsave(&q->owner_lock, flags);
381 spin_unlock_irqrestore(&q->owner_lock, flags);
384 /* exported - only checking permission */
385 int snd_seq_queue_check_access(int queueid, int client)
387 struct snd_seq_queue *q = queueptr(queueid);
393 spin_lock_irqsave(&q->owner_lock, flags);
394 access_ok = check_access(q, client);
395 spin_unlock_irqrestore(&q->owner_lock, flags);
400 /*----------------------------------------------------------------*/
403 * change queue's owner and permission
405 int snd_seq_queue_set_owner(int queueid, int client, int locked)
407 struct snd_seq_queue *q = queueptr(queueid);
413 if (! queue_access_lock(q, client)) {
418 spin_lock_irqsave(&q->owner_lock, flags);
419 q->locked = locked ? 1 : 0;
421 spin_unlock_irqrestore(&q->owner_lock, flags);
422 queue_access_unlock(q);
429 /*----------------------------------------------------------------*/
432 * q->use mutex should be down before calling this function to avoid
433 * confliction with snd_seq_queue_use()
435 int snd_seq_queue_timer_open(int queueid)
438 struct snd_seq_queue *queue;
439 struct snd_seq_timer *tmr;
441 queue = queueptr(queueid);
445 if ((result = snd_seq_timer_open(queue)) < 0) {
446 snd_seq_timer_defaults(tmr);
447 result = snd_seq_timer_open(queue);
454 * q->use mutex should be down before calling this function
456 int snd_seq_queue_timer_close(int queueid)
458 struct snd_seq_queue *queue;
461 queue = queueptr(queueid);
464 snd_seq_timer_close(queue);
469 /* change queue tempo and ppq */
470 int snd_seq_queue_timer_set_tempo(int queueid, int client,
471 struct snd_seq_queue_tempo *info)
473 struct snd_seq_queue *q = queueptr(queueid);
478 if (! queue_access_lock(q, client)) {
483 result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq);
484 if (result >= 0 && info->skew_base > 0)
485 result = snd_seq_timer_set_skew(q->timer, info->skew_value,
487 queue_access_unlock(q);
492 /* use or unuse this queue */
493 static void queue_use(struct snd_seq_queue *queue, int client, int use)
496 if (!test_and_set_bit(client, queue->clients_bitmap))
499 if (test_and_clear_bit(client, queue->clients_bitmap))
502 if (queue->clients) {
503 if (use && queue->clients == 1)
504 snd_seq_timer_defaults(queue->timer);
505 snd_seq_timer_open(queue);
507 snd_seq_timer_close(queue);
511 /* use or unuse this queue -
512 * if it is the first client, starts the timer.
513 * if it is not longer used by any clients, stop the timer.
515 int snd_seq_queue_use(int queueid, int client, int use)
517 struct snd_seq_queue *queue;
519 queue = queueptr(queueid);
522 mutex_lock(&queue->timer_mutex);
523 queue_use(queue, client, use);
524 mutex_unlock(&queue->timer_mutex);
530 * check if queue is used by the client
531 * return negative value if the queue is invalid.
532 * return 0 if not used, 1 if used.
534 int snd_seq_queue_is_used(int queueid, int client)
536 struct snd_seq_queue *q;
539 q = queueptr(queueid);
541 return -EINVAL; /* invalid queue */
542 result = test_bit(client, q->clients_bitmap) ? 1 : 0;
548 /*----------------------------------------------------------------*/
550 /* notification that client has left the system -
551 * stop the timer on all queues owned by this client
553 void snd_seq_queue_client_termination(int client)
557 struct snd_seq_queue *q;
560 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
561 if ((q = queueptr(i)) == NULL)
563 spin_lock_irqsave(&q->owner_lock, flags);
564 matched = (q->owner == client);
567 spin_unlock_irqrestore(&q->owner_lock, flags);
569 if (q->timer->running)
570 snd_seq_timer_stop(q->timer);
571 snd_seq_timer_reset(q->timer);
577 /* final stage notification -
578 * remove cells for no longer exist client (for non-owned queue)
579 * or delete this queue (for owned queue)
581 void snd_seq_queue_client_leave(int client)
584 struct snd_seq_queue *q;
586 /* delete own queues from queue list */
587 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
588 if ((q = queue_list_remove(i, client)) != NULL)
592 /* remove cells from existing queues -
593 * they are not owned by this client
595 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
596 if ((q = queueptr(i)) == NULL)
598 if (test_bit(client, q->clients_bitmap)) {
599 snd_seq_prioq_leave(q->tickq, client, 0);
600 snd_seq_prioq_leave(q->timeq, client, 0);
601 snd_seq_queue_use(q->queue, client, 0);
609 /*----------------------------------------------------------------*/
611 /* remove cells from all queues */
612 void snd_seq_queue_client_leave_cells(int client)
615 struct snd_seq_queue *q;
617 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
618 if ((q = queueptr(i)) == NULL)
620 snd_seq_prioq_leave(q->tickq, client, 0);
621 snd_seq_prioq_leave(q->timeq, client, 0);
626 /* remove cells based on flush criteria */
627 void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info)
630 struct snd_seq_queue *q;
632 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
633 if ((q = queueptr(i)) == NULL)
635 if (test_bit(client, q->clients_bitmap) &&
636 (! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) ||
637 q->queue == info->queue)) {
638 snd_seq_prioq_remove_events(q->tickq, client, info);
639 snd_seq_prioq_remove_events(q->timeq, client, info);
645 /*----------------------------------------------------------------*/
648 * send events to all subscribed ports
650 static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
653 struct snd_seq_event sev;
657 sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS;
658 sev.time.tick = q->timer->tick.cur_tick;
659 sev.queue = q->queue;
660 sev.data.queue.queue = q->queue;
662 /* broadcast events from Timer port */
663 sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
664 sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
665 sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
666 snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop);
670 * process a received queue-control event.
671 * this function is exported for seq_sync.c.
673 static void snd_seq_queue_process_event(struct snd_seq_queue *q,
674 struct snd_seq_event *ev,
678 case SNDRV_SEQ_EVENT_START:
679 snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
680 snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
681 if (! snd_seq_timer_start(q->timer))
682 queue_broadcast_event(q, ev, atomic, hop);
685 case SNDRV_SEQ_EVENT_CONTINUE:
686 if (! snd_seq_timer_continue(q->timer))
687 queue_broadcast_event(q, ev, atomic, hop);
690 case SNDRV_SEQ_EVENT_STOP:
691 snd_seq_timer_stop(q->timer);
692 queue_broadcast_event(q, ev, atomic, hop);
695 case SNDRV_SEQ_EVENT_TEMPO:
696 snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
697 queue_broadcast_event(q, ev, atomic, hop);
700 case SNDRV_SEQ_EVENT_SETPOS_TICK:
701 if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
702 queue_broadcast_event(q, ev, atomic, hop);
706 case SNDRV_SEQ_EVENT_SETPOS_TIME:
707 if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
708 queue_broadcast_event(q, ev, atomic, hop);
711 case SNDRV_SEQ_EVENT_QUEUE_SKEW:
712 if (snd_seq_timer_set_skew(q->timer,
713 ev->data.queue.param.skew.value,
714 ev->data.queue.param.skew.base) == 0) {
715 queue_broadcast_event(q, ev, atomic, hop);
723 * Queue control via timer control port:
724 * this function is exported as a callback of timer port.
726 int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop)
728 struct snd_seq_queue *q;
732 q = queueptr(ev->data.queue.queue);
737 if (! queue_access_lock(q, ev->source.client)) {
742 snd_seq_queue_process_event(q, ev, atomic, hop);
744 queue_access_unlock(q);
750 /*----------------------------------------------------------------*/
752 #ifdef CONFIG_SND_PROC_FS
753 /* exported to seq_info.c */
754 void snd_seq_info_queues_read(struct snd_info_entry *entry,
755 struct snd_info_buffer *buffer)
758 struct snd_seq_queue *q;
759 struct snd_seq_timer *tmr;
763 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
764 if ((q = queueptr(i)) == NULL)
769 bpm = 60000000 / tmr->tempo;
773 spin_lock_irq(&q->owner_lock);
776 spin_unlock_irq(&q->owner_lock);
778 snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
779 snd_iprintf(buffer, "owned by client : %d\n", owner);
780 snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
781 snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
782 snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
783 snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
784 snd_iprintf(buffer, "timer PPQ : %d\n", tmr->ppq);
785 snd_iprintf(buffer, "current tempo : %d\n", tmr->tempo);
786 snd_iprintf(buffer, "current BPM : %d\n", bpm);
787 snd_iprintf(buffer, "current time : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec);
788 snd_iprintf(buffer, "current tick : %d\n", tmr->tick.cur_tick);
789 snd_iprintf(buffer, "\n");
793 #endif /* CONFIG_SND_PROC_FS */