1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Digital Audio (PCM) abstract layer
4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
8 #include <linux/module.h>
9 #include <linux/file.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/time.h>
13 #include <linux/pm_qos.h>
15 #include <linux/dma-mapping.h>
16 #include <sound/core.h>
17 #include <sound/control.h>
18 #include <sound/info.h>
19 #include <sound/pcm.h>
20 #include <sound/pcm_params.h>
21 #include <sound/timer.h>
22 #include <sound/minors.h>
23 #include <linux/uio.h>
24 #include <linux/delay.h>
26 #include "pcm_local.h"
28 #ifdef CONFIG_SND_DEBUG
29 #define CREATE_TRACE_POINTS
30 #include "pcm_param_trace.h"
32 #define trace_hw_mask_param_enabled() 0
33 #define trace_hw_interval_param_enabled() 0
34 #define trace_hw_mask_param(substream, type, index, prev, curr)
35 #define trace_hw_interval_param(substream, type, index, prev, curr)
42 struct snd_pcm_hw_params_old {
44 unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
45 SNDRV_PCM_HW_PARAM_ACCESS + 1];
46 struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
47 SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
52 unsigned int rate_num;
53 unsigned int rate_den;
54 snd_pcm_uframes_t fifo_size;
55 unsigned char reserved[64];
58 #ifdef CONFIG_SND_SUPPORT_OLD_API
59 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
60 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
62 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
63 struct snd_pcm_hw_params_old __user * _oparams);
64 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
65 struct snd_pcm_hw_params_old __user * _oparams);
67 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
73 static DECLARE_RWSEM(snd_pcm_link_rwsem);
75 void snd_pcm_group_init(struct snd_pcm_group *group)
77 spin_lock_init(&group->lock);
78 mutex_init(&group->mutex);
79 INIT_LIST_HEAD(&group->substreams);
80 refcount_set(&group->refs, 1);
83 /* define group lock helpers */
84 #define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \
85 static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
88 mutex_ ## mutex_action(&group->mutex); \
90 spin_ ## action(&group->lock); \
93 DEFINE_PCM_GROUP_LOCK(lock, lock);
94 DEFINE_PCM_GROUP_LOCK(unlock, unlock);
95 DEFINE_PCM_GROUP_LOCK(lock_irq, lock);
96 DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock);
99 * snd_pcm_stream_lock - Lock the PCM stream
100 * @substream: PCM substream
102 * This locks the PCM stream's spinlock or mutex depending on the nonatomic
103 * flag of the given substream. This also takes the global link rw lock
104 * (or rw sem), too, for avoiding the race with linked streams.
106 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
108 snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic);
110 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
113 * snd_pcm_stream_lock - Unlock the PCM stream
114 * @substream: PCM substream
116 * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
118 void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
120 snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic);
122 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
125 * snd_pcm_stream_lock_irq - Lock the PCM stream
126 * @substream: PCM substream
128 * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
129 * IRQ (only when nonatomic is false). In nonatomic case, this is identical
130 * as snd_pcm_stream_lock().
132 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
134 snd_pcm_group_lock_irq(&substream->self_group,
135 substream->pcm->nonatomic);
137 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
139 static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream)
141 struct snd_pcm_group *group = &substream->self_group;
143 if (substream->pcm->nonatomic)
144 mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING);
146 spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
150 * snd_pcm_stream_unlock_irq - Unlock the PCM stream
151 * @substream: PCM substream
153 * This is a counter-part of snd_pcm_stream_lock_irq().
155 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
157 snd_pcm_group_unlock_irq(&substream->self_group,
158 substream->pcm->nonatomic);
160 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
162 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
164 unsigned long flags = 0;
165 if (substream->pcm->nonatomic)
166 mutex_lock(&substream->self_group.mutex);
168 spin_lock_irqsave(&substream->self_group.lock, flags);
171 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
174 * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
175 * @substream: PCM substream
178 * This is a counter-part of snd_pcm_stream_lock_irqsave().
180 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
183 if (substream->pcm->nonatomic)
184 mutex_unlock(&substream->self_group.mutex);
186 spin_unlock_irqrestore(&substream->self_group.lock, flags);
188 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
190 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
192 struct snd_pcm *pcm = substream->pcm;
193 struct snd_pcm_str *pstr = substream->pstr;
195 memset(info, 0, sizeof(*info));
196 info->card = pcm->card->number;
197 info->device = pcm->device;
198 info->stream = substream->stream;
199 info->subdevice = substream->number;
200 strlcpy(info->id, pcm->id, sizeof(info->id));
201 strlcpy(info->name, pcm->name, sizeof(info->name));
202 info->dev_class = pcm->dev_class;
203 info->dev_subclass = pcm->dev_subclass;
204 info->subdevices_count = pstr->substream_count;
205 info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
206 strlcpy(info->subname, substream->name, sizeof(info->subname));
211 int snd_pcm_info_user(struct snd_pcm_substream *substream,
212 struct snd_pcm_info __user * _info)
214 struct snd_pcm_info *info;
217 info = kmalloc(sizeof(*info), GFP_KERNEL);
220 err = snd_pcm_info(substream, info);
222 if (copy_to_user(_info, info, sizeof(*info)))
229 static bool hw_support_mmap(struct snd_pcm_substream *substream)
231 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
234 if (substream->ops->mmap ||
235 (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV &&
236 substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC))
239 return dma_can_mmap(substream->dma_buffer.dev.dev);
242 static int constrain_mask_params(struct snd_pcm_substream *substream,
243 struct snd_pcm_hw_params *params)
245 struct snd_pcm_hw_constraints *constrs =
246 &substream->runtime->hw_constraints;
249 struct snd_mask old_mask;
252 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
253 m = hw_param_mask(params, k);
254 if (snd_mask_empty(m))
257 /* This parameter is not requested to change by a caller. */
258 if (!(params->rmask & (1 << k)))
261 if (trace_hw_mask_param_enabled())
264 changed = snd_mask_refine(m, constrs_mask(constrs, k));
270 /* Set corresponding flag so that the caller gets it. */
271 trace_hw_mask_param(substream, k, 0, &old_mask, m);
272 params->cmask |= 1 << k;
278 static int constrain_interval_params(struct snd_pcm_substream *substream,
279 struct snd_pcm_hw_params *params)
281 struct snd_pcm_hw_constraints *constrs =
282 &substream->runtime->hw_constraints;
283 struct snd_interval *i;
285 struct snd_interval old_interval;
288 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
289 i = hw_param_interval(params, k);
290 if (snd_interval_empty(i))
293 /* This parameter is not requested to change by a caller. */
294 if (!(params->rmask & (1 << k)))
297 if (trace_hw_interval_param_enabled())
300 changed = snd_interval_refine(i, constrs_interval(constrs, k));
306 /* Set corresponding flag so that the caller gets it. */
307 trace_hw_interval_param(substream, k, 0, &old_interval, i);
308 params->cmask |= 1 << k;
314 static int constrain_params_by_rules(struct snd_pcm_substream *substream,
315 struct snd_pcm_hw_params *params)
317 struct snd_pcm_hw_constraints *constrs =
318 &substream->runtime->hw_constraints;
320 unsigned int *rstamps;
321 unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
323 struct snd_pcm_hw_rule *r;
325 struct snd_mask old_mask;
326 struct snd_interval old_interval;
328 int changed, err = 0;
331 * Each application of rule has own sequence number.
333 * Each member of 'rstamps' array represents the sequence number of
334 * recent application of corresponding rule.
336 rstamps = kcalloc(constrs->rules_num, sizeof(unsigned int), GFP_KERNEL);
341 * Each member of 'vstamps' array represents the sequence number of
342 * recent application of rule in which corresponding parameters were
345 * In initial state, elements corresponding to parameters requested by
346 * a caller is 1. For unrequested parameters, corresponding members
347 * have 0 so that the parameters are never changed anymore.
349 for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
350 vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
352 /* Due to the above design, actual sequence number starts at 2. */
355 /* Apply all rules in order. */
357 for (k = 0; k < constrs->rules_num; k++) {
358 r = &constrs->rules[k];
361 * Check condition bits of this rule. When the rule has
362 * some condition bits, parameter without the bits is
363 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
364 * is an example of the condition bits.
366 if (r->cond && !(r->cond & params->flags))
370 * The 'deps' array includes maximum three dependencies
371 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
372 * member of this array is a sentinel and should be
375 * This rule should be processed in this time when dependent
376 * parameters were changed at former applications of the other
379 for (d = 0; r->deps[d] >= 0; d++) {
380 if (vstamps[r->deps[d]] > rstamps[k])
386 if (trace_hw_mask_param_enabled()) {
387 if (hw_is_mask(r->var))
388 old_mask = *hw_param_mask(params, r->var);
390 if (trace_hw_interval_param_enabled()) {
391 if (hw_is_interval(r->var))
392 old_interval = *hw_param_interval(params, r->var);
395 changed = r->func(params, r);
402 * When the parameter is changed, notify it to the caller
403 * by corresponding returned bit, then preparing for next
406 if (changed && r->var >= 0) {
407 if (hw_is_mask(r->var)) {
408 trace_hw_mask_param(substream, r->var,
410 hw_param_mask(params, r->var));
412 if (hw_is_interval(r->var)) {
413 trace_hw_interval_param(substream, r->var,
414 k + 1, &old_interval,
415 hw_param_interval(params, r->var));
418 params->cmask |= (1 << r->var);
419 vstamps[r->var] = stamp;
423 rstamps[k] = stamp++;
426 /* Iterate to evaluate all rules till no parameters are changed. */
435 static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
436 struct snd_pcm_hw_params *params)
438 const struct snd_interval *i;
439 const struct snd_mask *m;
442 if (!params->msbits) {
443 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
444 if (snd_interval_single(i))
445 params->msbits = snd_interval_value(i);
448 if (!params->rate_den) {
449 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
450 if (snd_interval_single(i)) {
451 params->rate_num = snd_interval_value(i);
452 params->rate_den = 1;
456 if (!params->fifo_size) {
457 m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
458 i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
459 if (snd_mask_single(m) && snd_interval_single(i)) {
460 err = substream->ops->ioctl(substream,
461 SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
468 params->info = substream->runtime->hw.info;
469 params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
470 SNDRV_PCM_INFO_DRAIN_TRIGGER);
471 if (!hw_support_mmap(substream))
472 params->info &= ~(SNDRV_PCM_INFO_MMAP |
473 SNDRV_PCM_INFO_MMAP_VALID);
479 int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
480 struct snd_pcm_hw_params *params)
485 params->fifo_size = 0;
486 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
488 if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
489 params->rate_num = 0;
490 params->rate_den = 0;
493 err = constrain_mask_params(substream, params);
497 err = constrain_interval_params(substream, params);
501 err = constrain_params_by_rules(substream, params);
509 EXPORT_SYMBOL(snd_pcm_hw_refine);
511 static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
512 struct snd_pcm_hw_params __user * _params)
514 struct snd_pcm_hw_params *params;
517 params = memdup_user(_params, sizeof(*params));
519 return PTR_ERR(params);
521 err = snd_pcm_hw_refine(substream, params);
525 err = fixup_unreferenced_params(substream, params);
529 if (copy_to_user(_params, params, sizeof(*params)))
536 static int period_to_usecs(struct snd_pcm_runtime *runtime)
541 return -1; /* invalid */
543 /* take 75% of period time as the deadline */
544 usecs = (750000 / runtime->rate) * runtime->period_size;
545 usecs += ((750000 % runtime->rate) * runtime->period_size) /
551 static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
553 snd_pcm_stream_lock_irq(substream);
554 if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
555 substream->runtime->status->state = state;
556 snd_pcm_stream_unlock_irq(substream);
559 static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
562 #ifdef CONFIG_SND_PCM_TIMER
563 if (substream->timer)
564 snd_timer_notify(substream->timer, event,
565 &substream->runtime->trigger_tstamp);
570 * snd_pcm_hw_param_choose - choose a configuration defined by @params
572 * @params: the hw_params instance
574 * Choose one configuration from configuration space defined by @params.
575 * The configuration chosen is that obtained fixing in this order:
576 * first access, first format, first subformat, min channels,
577 * min rate, min period time, max buffer size, min tick time
579 * Return: Zero if successful, or a negative error code on failure.
581 static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
582 struct snd_pcm_hw_params *params)
584 static const int vars[] = {
585 SNDRV_PCM_HW_PARAM_ACCESS,
586 SNDRV_PCM_HW_PARAM_FORMAT,
587 SNDRV_PCM_HW_PARAM_SUBFORMAT,
588 SNDRV_PCM_HW_PARAM_CHANNELS,
589 SNDRV_PCM_HW_PARAM_RATE,
590 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
591 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
592 SNDRV_PCM_HW_PARAM_TICK_TIME,
596 struct snd_mask old_mask;
597 struct snd_interval old_interval;
600 for (v = vars; *v != -1; v++) {
601 /* Keep old parameter to trace. */
602 if (trace_hw_mask_param_enabled()) {
604 old_mask = *hw_param_mask(params, *v);
606 if (trace_hw_interval_param_enabled()) {
607 if (hw_is_interval(*v))
608 old_interval = *hw_param_interval(params, *v);
610 if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
611 changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
613 changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
619 /* Trace the changed parameter. */
620 if (hw_is_mask(*v)) {
621 trace_hw_mask_param(pcm, *v, 0, &old_mask,
622 hw_param_mask(params, *v));
624 if (hw_is_interval(*v)) {
625 trace_hw_interval_param(pcm, *v, 0, &old_interval,
626 hw_param_interval(params, *v));
633 /* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
634 * block the further r/w operations
636 static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
638 if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
640 mutex_lock(&runtime->buffer_mutex);
641 return 0; /* keep buffer_mutex, unlocked by below */
644 /* release buffer_mutex and clear r/w access flag */
645 static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
647 mutex_unlock(&runtime->buffer_mutex);
648 atomic_inc(&runtime->buffer_accessing);
651 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
652 #define is_oss_stream(substream) ((substream)->oss.oss)
654 #define is_oss_stream(substream) false
657 static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
658 struct snd_pcm_hw_params *params)
660 struct snd_pcm_runtime *runtime;
663 snd_pcm_uframes_t frames;
665 if (PCM_RUNTIME_CHECK(substream))
667 runtime = substream->runtime;
668 err = snd_pcm_buffer_access_lock(runtime);
671 snd_pcm_stream_lock_irq(substream);
672 switch (runtime->status->state) {
673 case SNDRV_PCM_STATE_OPEN:
674 case SNDRV_PCM_STATE_SETUP:
675 case SNDRV_PCM_STATE_PREPARED:
676 if (!is_oss_stream(substream) &&
677 atomic_read(&substream->mmap_count))
684 snd_pcm_stream_unlock_irq(substream);
689 err = snd_pcm_hw_refine(substream, params);
693 err = snd_pcm_hw_params_choose(substream, params);
697 err = fixup_unreferenced_params(substream, params);
701 if (substream->ops->hw_params != NULL) {
702 err = substream->ops->hw_params(substream, params);
707 runtime->access = params_access(params);
708 runtime->format = params_format(params);
709 runtime->subformat = params_subformat(params);
710 runtime->channels = params_channels(params);
711 runtime->rate = params_rate(params);
712 runtime->period_size = params_period_size(params);
713 runtime->periods = params_periods(params);
714 runtime->buffer_size = params_buffer_size(params);
715 runtime->info = params->info;
716 runtime->rate_num = params->rate_num;
717 runtime->rate_den = params->rate_den;
718 runtime->no_period_wakeup =
719 (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
720 (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
722 bits = snd_pcm_format_physical_width(runtime->format);
723 runtime->sample_bits = bits;
724 bits *= runtime->channels;
725 runtime->frame_bits = bits;
727 while (bits % 8 != 0) {
731 runtime->byte_align = bits / 8;
732 runtime->min_align = frames;
734 /* Default sw params */
735 runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
736 runtime->period_step = 1;
737 runtime->control->avail_min = runtime->period_size;
738 runtime->start_threshold = 1;
739 runtime->stop_threshold = runtime->buffer_size;
740 runtime->silence_threshold = 0;
741 runtime->silence_size = 0;
742 runtime->boundary = runtime->buffer_size;
743 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
744 runtime->boundary *= 2;
746 /* clear the buffer for avoiding possible kernel info leaks */
747 if (runtime->dma_area && !substream->ops->copy_user) {
748 size_t size = runtime->dma_bytes;
750 if (runtime->info & SNDRV_PCM_INFO_MMAP)
751 size = PAGE_ALIGN(size);
752 memset(runtime->dma_area, 0, size);
755 snd_pcm_timer_resolution_change(substream);
756 snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
758 if (pm_qos_request_active(&substream->latency_pm_qos_req))
759 pm_qos_remove_request(&substream->latency_pm_qos_req);
760 if ((usecs = period_to_usecs(runtime)) >= 0)
761 pm_qos_add_request(&substream->latency_pm_qos_req,
762 PM_QOS_CPU_DMA_LATENCY, usecs);
766 /* hardware might be unusable from this time,
767 * so we force application to retry to set
768 * the correct hardware parameter settings
770 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
771 if (substream->ops->hw_free != NULL)
772 substream->ops->hw_free(substream);
775 snd_pcm_buffer_access_unlock(runtime);
779 static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
780 struct snd_pcm_hw_params __user * _params)
782 struct snd_pcm_hw_params *params;
785 params = memdup_user(_params, sizeof(*params));
787 return PTR_ERR(params);
789 err = snd_pcm_hw_params(substream, params);
793 if (copy_to_user(_params, params, sizeof(*params)))
800 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
802 struct snd_pcm_runtime *runtime;
805 if (PCM_RUNTIME_CHECK(substream))
807 runtime = substream->runtime;
808 result = snd_pcm_buffer_access_lock(runtime);
811 snd_pcm_stream_lock_irq(substream);
812 switch (runtime->status->state) {
813 case SNDRV_PCM_STATE_SETUP:
814 case SNDRV_PCM_STATE_PREPARED:
815 if (atomic_read(&substream->mmap_count))
822 snd_pcm_stream_unlock_irq(substream);
825 if (substream->ops->hw_free)
826 result = substream->ops->hw_free(substream);
827 snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
828 pm_qos_remove_request(&substream->latency_pm_qos_req);
830 snd_pcm_buffer_access_unlock(runtime);
834 static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
835 struct snd_pcm_sw_params *params)
837 struct snd_pcm_runtime *runtime;
840 if (PCM_RUNTIME_CHECK(substream))
842 runtime = substream->runtime;
843 snd_pcm_stream_lock_irq(substream);
844 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
845 snd_pcm_stream_unlock_irq(substream);
848 snd_pcm_stream_unlock_irq(substream);
850 if (params->tstamp_mode < 0 ||
851 params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
853 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
854 params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
856 if (params->avail_min == 0)
858 if (params->silence_size >= runtime->boundary) {
859 if (params->silence_threshold != 0)
862 if (params->silence_size > params->silence_threshold)
864 if (params->silence_threshold > runtime->buffer_size)
868 snd_pcm_stream_lock_irq(substream);
869 runtime->tstamp_mode = params->tstamp_mode;
870 if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
871 runtime->tstamp_type = params->tstamp_type;
872 runtime->period_step = params->period_step;
873 runtime->control->avail_min = params->avail_min;
874 runtime->start_threshold = params->start_threshold;
875 runtime->stop_threshold = params->stop_threshold;
876 runtime->silence_threshold = params->silence_threshold;
877 runtime->silence_size = params->silence_size;
878 params->boundary = runtime->boundary;
879 if (snd_pcm_running(substream)) {
880 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
881 runtime->silence_size > 0)
882 snd_pcm_playback_silence(substream, ULONG_MAX);
883 err = snd_pcm_update_state(substream, runtime);
885 snd_pcm_stream_unlock_irq(substream);
889 static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
890 struct snd_pcm_sw_params __user * _params)
892 struct snd_pcm_sw_params params;
894 if (copy_from_user(¶ms, _params, sizeof(params)))
896 err = snd_pcm_sw_params(substream, ¶ms);
897 if (copy_to_user(_params, ¶ms, sizeof(params)))
902 static inline snd_pcm_uframes_t
903 snd_pcm_calc_delay(struct snd_pcm_substream *substream)
905 snd_pcm_uframes_t delay;
907 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
908 delay = snd_pcm_playback_hw_avail(substream->runtime);
910 delay = snd_pcm_capture_avail(substream->runtime);
911 return delay + substream->runtime->delay;
914 int snd_pcm_status(struct snd_pcm_substream *substream,
915 struct snd_pcm_status *status)
917 struct snd_pcm_runtime *runtime = substream->runtime;
919 snd_pcm_stream_lock_irq(substream);
921 snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
922 &runtime->audio_tstamp_config);
924 /* backwards compatible behavior */
925 if (runtime->audio_tstamp_config.type_requested ==
926 SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
927 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
928 runtime->audio_tstamp_config.type_requested =
929 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
931 runtime->audio_tstamp_config.type_requested =
932 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
933 runtime->audio_tstamp_report.valid = 0;
935 runtime->audio_tstamp_report.valid = 1;
937 status->state = runtime->status->state;
938 status->suspended_state = runtime->status->suspended_state;
939 if (status->state == SNDRV_PCM_STATE_OPEN)
941 status->trigger_tstamp = runtime->trigger_tstamp;
942 if (snd_pcm_running(substream)) {
943 snd_pcm_update_hw_ptr(substream);
944 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
945 status->tstamp = runtime->status->tstamp;
946 status->driver_tstamp = runtime->driver_tstamp;
947 status->audio_tstamp =
948 runtime->status->audio_tstamp;
949 if (runtime->audio_tstamp_report.valid == 1)
950 /* backwards compatibility, no report provided in COMPAT mode */
951 snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
952 &status->audio_tstamp_accuracy,
953 &runtime->audio_tstamp_report);
958 /* get tstamp only in fallback mode and only if enabled */
959 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
960 snd_pcm_gettime(runtime, &status->tstamp);
963 status->appl_ptr = runtime->control->appl_ptr;
964 status->hw_ptr = runtime->status->hw_ptr;
965 status->avail = snd_pcm_avail(substream);
966 status->delay = snd_pcm_running(substream) ?
967 snd_pcm_calc_delay(substream) : 0;
968 status->avail_max = runtime->avail_max;
969 status->overrange = runtime->overrange;
970 runtime->avail_max = 0;
971 runtime->overrange = 0;
973 snd_pcm_stream_unlock_irq(substream);
977 static int snd_pcm_status_user(struct snd_pcm_substream *substream,
978 struct snd_pcm_status __user * _status,
981 struct snd_pcm_status status;
984 memset(&status, 0, sizeof(status));
986 * with extension, parameters are read/write,
987 * get audio_tstamp_data from user,
988 * ignore rest of status structure
990 if (ext && get_user(status.audio_tstamp_data,
991 (u32 __user *)(&_status->audio_tstamp_data)))
993 res = snd_pcm_status(substream, &status);
996 if (copy_to_user(_status, &status, sizeof(status)))
1001 static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
1002 struct snd_pcm_channel_info * info)
1004 struct snd_pcm_runtime *runtime;
1005 unsigned int channel;
1007 channel = info->channel;
1008 runtime = substream->runtime;
1009 snd_pcm_stream_lock_irq(substream);
1010 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
1011 snd_pcm_stream_unlock_irq(substream);
1014 snd_pcm_stream_unlock_irq(substream);
1015 if (channel >= runtime->channels)
1017 memset(info, 0, sizeof(*info));
1018 info->channel = channel;
1019 return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
1022 static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
1023 struct snd_pcm_channel_info __user * _info)
1025 struct snd_pcm_channel_info info;
1028 if (copy_from_user(&info, _info, sizeof(info)))
1030 res = snd_pcm_channel_info(substream, &info);
1033 if (copy_to_user(_info, &info, sizeof(info)))
1038 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
1040 struct snd_pcm_runtime *runtime = substream->runtime;
1041 if (runtime->trigger_master == NULL)
1043 if (runtime->trigger_master == substream) {
1044 if (!runtime->trigger_tstamp_latched)
1045 snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
1047 snd_pcm_trigger_tstamp(runtime->trigger_master);
1048 runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
1050 runtime->trigger_master = NULL;
1054 int (*pre_action)(struct snd_pcm_substream *substream, int state);
1055 int (*do_action)(struct snd_pcm_substream *substream, int state);
1056 void (*undo_action)(struct snd_pcm_substream *substream, int state);
1057 void (*post_action)(struct snd_pcm_substream *substream, int state);
1061 * this functions is core for handling of linked stream
1062 * Note: the stream state might be changed also on failure
1063 * Note2: call with calling stream lock + link lock
1065 static int snd_pcm_action_group(const struct action_ops *ops,
1066 struct snd_pcm_substream *substream,
1067 int state, int stream_lock)
1069 struct snd_pcm_substream *s = NULL;
1070 struct snd_pcm_substream *s1;
1071 int res = 0, depth = 1;
1073 snd_pcm_group_for_each_entry(s, substream) {
1074 if (s != substream) {
1076 mutex_lock_nested(&s->runtime->buffer_mutex, depth);
1077 else if (s->pcm->nonatomic)
1078 mutex_lock_nested(&s->self_group.mutex, depth);
1080 spin_lock_nested(&s->self_group.lock, depth);
1083 res = ops->pre_action(s, state);
1087 snd_pcm_group_for_each_entry(s, substream) {
1088 res = ops->do_action(s, state);
1090 if (ops->undo_action) {
1091 snd_pcm_group_for_each_entry(s1, substream) {
1092 if (s1 == s) /* failed stream */
1094 ops->undo_action(s1, state);
1097 s = NULL; /* unlock all */
1101 snd_pcm_group_for_each_entry(s, substream) {
1102 ops->post_action(s, state);
1105 /* unlock streams */
1106 snd_pcm_group_for_each_entry(s1, substream) {
1107 if (s1 != substream) {
1109 mutex_unlock(&s1->runtime->buffer_mutex);
1110 else if (s1->pcm->nonatomic)
1111 mutex_unlock(&s1->self_group.mutex);
1113 spin_unlock(&s1->self_group.lock);
1115 if (s1 == s) /* end */
1122 * Note: call with stream lock
1124 static int snd_pcm_action_single(const struct action_ops *ops,
1125 struct snd_pcm_substream *substream,
1130 res = ops->pre_action(substream, state);
1133 res = ops->do_action(substream, state);
1135 ops->post_action(substream, state);
1136 else if (ops->undo_action)
1137 ops->undo_action(substream, state);
1141 static void snd_pcm_group_assign(struct snd_pcm_substream *substream,
1142 struct snd_pcm_group *new_group)
1144 substream->group = new_group;
1145 list_move(&substream->link_list, &new_group->substreams);
1149 * Unref and unlock the group, but keep the stream lock;
1150 * when the group becomes empty and no longer referred, destroy itself
1152 static void snd_pcm_group_unref(struct snd_pcm_group *group,
1153 struct snd_pcm_substream *substream)
1159 do_free = refcount_dec_and_test(&group->refs);
1160 snd_pcm_group_unlock(group, substream->pcm->nonatomic);
1166 * Lock the group inside a stream lock and reference it;
1167 * return the locked group object, or NULL if not linked
1169 static struct snd_pcm_group *
1170 snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
1172 bool nonatomic = substream->pcm->nonatomic;
1173 struct snd_pcm_group *group;
1177 if (!snd_pcm_stream_linked(substream))
1179 group = substream->group;
1180 /* block freeing the group object */
1181 refcount_inc(&group->refs);
1183 trylock = nonatomic ? mutex_trylock(&group->mutex) :
1184 spin_trylock(&group->lock);
1188 /* re-lock for avoiding ABBA deadlock */
1189 snd_pcm_stream_unlock(substream);
1190 snd_pcm_group_lock(group, nonatomic);
1191 snd_pcm_stream_lock(substream);
1193 /* check the group again; the above opens a small race window */
1194 if (substream->group == group)
1196 /* group changed, try again */
1197 snd_pcm_group_unref(group, substream);
1203 * Note: call with stream lock
1205 static int snd_pcm_action(const struct action_ops *ops,
1206 struct snd_pcm_substream *substream,
1209 struct snd_pcm_group *group;
1212 group = snd_pcm_stream_group_ref(substream);
1214 res = snd_pcm_action_group(ops, substream, state, 1);
1216 res = snd_pcm_action_single(ops, substream, state);
1217 snd_pcm_group_unref(group, substream);
1222 * Note: don't use any locks before
1224 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
1225 struct snd_pcm_substream *substream,
1230 snd_pcm_stream_lock_irq(substream);
1231 res = snd_pcm_action(ops, substream, state);
1232 snd_pcm_stream_unlock_irq(substream);
1238 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1239 struct snd_pcm_substream *substream,
1244 /* Guarantee the group members won't change during non-atomic action */
1245 down_read(&snd_pcm_link_rwsem);
1246 res = snd_pcm_buffer_access_lock(substream->runtime);
1249 if (snd_pcm_stream_linked(substream))
1250 res = snd_pcm_action_group(ops, substream, state, 0);
1252 res = snd_pcm_action_single(ops, substream, state);
1253 snd_pcm_buffer_access_unlock(substream->runtime);
1255 up_read(&snd_pcm_link_rwsem);
1262 static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
1264 struct snd_pcm_runtime *runtime = substream->runtime;
1265 if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
1267 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1268 !snd_pcm_playback_data(substream))
1270 runtime->trigger_tstamp_latched = false;
1271 runtime->trigger_master = substream;
1275 static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
1277 if (substream->runtime->trigger_master != substream)
1279 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
1282 static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
1284 if (substream->runtime->trigger_master == substream)
1285 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1288 static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
1290 struct snd_pcm_runtime *runtime = substream->runtime;
1291 snd_pcm_trigger_tstamp(substream);
1292 runtime->hw_ptr_jiffies = jiffies;
1293 runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
1295 runtime->status->state = state;
1296 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1297 runtime->silence_size > 0)
1298 snd_pcm_playback_silence(substream, ULONG_MAX);
1299 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
1302 static const struct action_ops snd_pcm_action_start = {
1303 .pre_action = snd_pcm_pre_start,
1304 .do_action = snd_pcm_do_start,
1305 .undo_action = snd_pcm_undo_start,
1306 .post_action = snd_pcm_post_start
1310 * snd_pcm_start - start all linked streams
1311 * @substream: the PCM substream instance
1313 * Return: Zero if successful, or a negative error code.
1314 * The stream lock must be acquired before calling this function.
1316 int snd_pcm_start(struct snd_pcm_substream *substream)
1318 return snd_pcm_action(&snd_pcm_action_start, substream,
1319 SNDRV_PCM_STATE_RUNNING);
1322 /* take the stream lock and start the streams */
1323 static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
1325 return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
1326 SNDRV_PCM_STATE_RUNNING);
1332 static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
1334 struct snd_pcm_runtime *runtime = substream->runtime;
1335 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1337 runtime->trigger_master = substream;
1341 static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
1343 if (substream->runtime->trigger_master == substream &&
1344 snd_pcm_running(substream))
1345 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1346 return 0; /* unconditonally stop all substreams */
1349 static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
1351 struct snd_pcm_runtime *runtime = substream->runtime;
1352 if (runtime->status->state != state) {
1353 snd_pcm_trigger_tstamp(substream);
1354 runtime->status->state = state;
1355 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
1357 wake_up(&runtime->sleep);
1358 wake_up(&runtime->tsleep);
1361 static const struct action_ops snd_pcm_action_stop = {
1362 .pre_action = snd_pcm_pre_stop,
1363 .do_action = snd_pcm_do_stop,
1364 .post_action = snd_pcm_post_stop
1368 * snd_pcm_stop - try to stop all running streams in the substream group
1369 * @substream: the PCM substream instance
1370 * @state: PCM state after stopping the stream
1372 * The state of each stream is then changed to the given state unconditionally.
1374 * Return: Zero if successful, or a negative error code.
1376 int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
1378 return snd_pcm_action(&snd_pcm_action_stop, substream, state);
1380 EXPORT_SYMBOL(snd_pcm_stop);
1383 * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1384 * @substream: the PCM substream
1386 * After stopping, the state is changed to SETUP.
1387 * Unlike snd_pcm_stop(), this affects only the given stream.
1389 * Return: Zero if succesful, or a negative error code.
1391 int snd_pcm_drain_done(struct snd_pcm_substream *substream)
1393 return snd_pcm_action_single(&snd_pcm_action_stop, substream,
1394 SNDRV_PCM_STATE_SETUP);
1398 * snd_pcm_stop_xrun - stop the running streams as XRUN
1399 * @substream: the PCM substream instance
1401 * This stops the given running substream (and all linked substreams) as XRUN.
1402 * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1404 * Return: Zero if successful, or a negative error code.
1406 int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
1408 unsigned long flags;
1410 snd_pcm_stream_lock_irqsave(substream, flags);
1411 if (substream->runtime && snd_pcm_running(substream))
1412 __snd_pcm_xrun(substream);
1413 snd_pcm_stream_unlock_irqrestore(substream, flags);
1416 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
1421 static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
1423 struct snd_pcm_runtime *runtime = substream->runtime;
1424 if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
1427 if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
1429 } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
1431 runtime->trigger_master = substream;
1435 static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
1437 if (substream->runtime->trigger_master != substream)
1439 /* some drivers might use hw_ptr to recover from the pause -
1440 update the hw_ptr now */
1442 snd_pcm_update_hw_ptr(substream);
1443 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1444 * a delta between the current jiffies, this gives a large enough
1445 * delta, effectively to skip the check once.
1447 substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
1448 return substream->ops->trigger(substream,
1449 push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
1450 SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
1453 static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
1455 if (substream->runtime->trigger_master == substream)
1456 substream->ops->trigger(substream,
1457 push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
1458 SNDRV_PCM_TRIGGER_PAUSE_PUSH);
1461 static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
1463 struct snd_pcm_runtime *runtime = substream->runtime;
1464 snd_pcm_trigger_tstamp(substream);
1466 runtime->status->state = SNDRV_PCM_STATE_PAUSED;
1467 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
1468 wake_up(&runtime->sleep);
1469 wake_up(&runtime->tsleep);
1471 runtime->status->state = SNDRV_PCM_STATE_RUNNING;
1472 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
1476 static const struct action_ops snd_pcm_action_pause = {
1477 .pre_action = snd_pcm_pre_pause,
1478 .do_action = snd_pcm_do_pause,
1479 .undo_action = snd_pcm_undo_pause,
1480 .post_action = snd_pcm_post_pause
1484 * Push/release the pause for all linked streams.
1486 static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
1488 return snd_pcm_action(&snd_pcm_action_pause, substream, push);
1494 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
1496 struct snd_pcm_runtime *runtime = substream->runtime;
1497 switch (runtime->status->state) {
1498 case SNDRV_PCM_STATE_SUSPENDED:
1500 /* unresumable PCM state; return -EBUSY for skipping suspend */
1501 case SNDRV_PCM_STATE_OPEN:
1502 case SNDRV_PCM_STATE_SETUP:
1503 case SNDRV_PCM_STATE_DISCONNECTED:
1506 runtime->trigger_master = substream;
1510 static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
1512 struct snd_pcm_runtime *runtime = substream->runtime;
1513 if (runtime->trigger_master != substream)
1515 if (! snd_pcm_running(substream))
1517 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1518 return 0; /* suspend unconditionally */
1521 static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
1523 struct snd_pcm_runtime *runtime = substream->runtime;
1524 snd_pcm_trigger_tstamp(substream);
1525 runtime->status->suspended_state = runtime->status->state;
1526 runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
1527 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
1528 wake_up(&runtime->sleep);
1529 wake_up(&runtime->tsleep);
1532 static const struct action_ops snd_pcm_action_suspend = {
1533 .pre_action = snd_pcm_pre_suspend,
1534 .do_action = snd_pcm_do_suspend,
1535 .post_action = snd_pcm_post_suspend
1539 * snd_pcm_suspend - trigger SUSPEND to all linked streams
1540 * @substream: the PCM substream
1542 * After this call, all streams are changed to SUSPENDED state.
1544 * Return: Zero if successful, or a negative error code.
1546 static int snd_pcm_suspend(struct snd_pcm_substream *substream)
1549 unsigned long flags;
1551 snd_pcm_stream_lock_irqsave(substream, flags);
1552 err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
1553 snd_pcm_stream_unlock_irqrestore(substream, flags);
1558 * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1559 * @pcm: the PCM instance
1561 * After this call, all streams are changed to SUSPENDED state.
1563 * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1565 int snd_pcm_suspend_all(struct snd_pcm *pcm)
1567 struct snd_pcm_substream *substream;
1568 int stream, err = 0;
1573 for (stream = 0; stream < 2; stream++) {
1574 for (substream = pcm->streams[stream].substream;
1575 substream; substream = substream->next) {
1576 /* FIXME: the open/close code should lock this as well */
1577 if (substream->runtime == NULL)
1581 * Skip BE dai link PCM's that are internal and may
1582 * not have their substream ops set.
1584 if (!substream->ops)
1587 err = snd_pcm_suspend(substream);
1588 if (err < 0 && err != -EBUSY)
1594 EXPORT_SYMBOL(snd_pcm_suspend_all);
1598 static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
1600 struct snd_pcm_runtime *runtime = substream->runtime;
1601 if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
1603 runtime->trigger_master = substream;
1607 static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
1609 struct snd_pcm_runtime *runtime = substream->runtime;
1610 if (runtime->trigger_master != substream)
1612 /* DMA not running previously? */
1613 if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING &&
1614 (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING ||
1615 substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
1617 return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
1620 static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
1622 if (substream->runtime->trigger_master == substream &&
1623 snd_pcm_running(substream))
1624 substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1627 static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
1629 struct snd_pcm_runtime *runtime = substream->runtime;
1630 snd_pcm_trigger_tstamp(substream);
1631 runtime->status->state = runtime->status->suspended_state;
1632 snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
1635 static const struct action_ops snd_pcm_action_resume = {
1636 .pre_action = snd_pcm_pre_resume,
1637 .do_action = snd_pcm_do_resume,
1638 .undo_action = snd_pcm_undo_resume,
1639 .post_action = snd_pcm_post_resume
1642 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1644 return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
1649 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1654 #endif /* CONFIG_PM */
1659 * Change the RUNNING stream(s) to XRUN state.
1661 static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1663 struct snd_pcm_runtime *runtime = substream->runtime;
1666 snd_pcm_stream_lock_irq(substream);
1667 switch (runtime->status->state) {
1668 case SNDRV_PCM_STATE_XRUN:
1669 result = 0; /* already there */
1671 case SNDRV_PCM_STATE_RUNNING:
1672 __snd_pcm_xrun(substream);
1678 snd_pcm_stream_unlock_irq(substream);
1685 static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
1687 struct snd_pcm_runtime *runtime = substream->runtime;
1688 switch (runtime->status->state) {
1689 case SNDRV_PCM_STATE_RUNNING:
1690 case SNDRV_PCM_STATE_PREPARED:
1691 case SNDRV_PCM_STATE_PAUSED:
1692 case SNDRV_PCM_STATE_SUSPENDED:
1699 static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
1701 struct snd_pcm_runtime *runtime = substream->runtime;
1702 int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
1705 snd_pcm_stream_lock_irq(substream);
1706 runtime->hw_ptr_base = 0;
1707 runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
1708 runtime->status->hw_ptr % runtime->period_size;
1709 runtime->silence_start = runtime->status->hw_ptr;
1710 runtime->silence_filled = 0;
1711 snd_pcm_stream_unlock_irq(substream);
1715 static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
1717 struct snd_pcm_runtime *runtime = substream->runtime;
1718 snd_pcm_stream_lock_irq(substream);
1719 runtime->control->appl_ptr = runtime->status->hw_ptr;
1720 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1721 runtime->silence_size > 0)
1722 snd_pcm_playback_silence(substream, ULONG_MAX);
1723 snd_pcm_stream_unlock_irq(substream);
1726 static const struct action_ops snd_pcm_action_reset = {
1727 .pre_action = snd_pcm_pre_reset,
1728 .do_action = snd_pcm_do_reset,
1729 .post_action = snd_pcm_post_reset
1732 static int snd_pcm_reset(struct snd_pcm_substream *substream)
1734 return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
1740 /* we use the second argument for updating f_flags */
1741 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
1744 struct snd_pcm_runtime *runtime = substream->runtime;
1745 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1746 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1748 if (snd_pcm_running(substream))
1750 substream->f_flags = f_flags;
1754 static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
1757 err = substream->ops->prepare(substream);
1760 return snd_pcm_do_reset(substream, 0);
1763 static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
1765 struct snd_pcm_runtime *runtime = substream->runtime;
1766 runtime->control->appl_ptr = runtime->status->hw_ptr;
1767 snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
1770 static const struct action_ops snd_pcm_action_prepare = {
1771 .pre_action = snd_pcm_pre_prepare,
1772 .do_action = snd_pcm_do_prepare,
1773 .post_action = snd_pcm_post_prepare
1777 * snd_pcm_prepare - prepare the PCM substream to be triggerable
1778 * @substream: the PCM substream instance
1779 * @file: file to refer f_flags
1781 * Return: Zero if successful, or a negative error code.
1783 static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1789 f_flags = file->f_flags;
1791 f_flags = substream->f_flags;
1793 snd_pcm_stream_lock_irq(substream);
1794 switch (substream->runtime->status->state) {
1795 case SNDRV_PCM_STATE_PAUSED:
1796 snd_pcm_pause(substream, 0);
1798 case SNDRV_PCM_STATE_SUSPENDED:
1799 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1802 snd_pcm_stream_unlock_irq(substream);
1804 return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
1805 substream, f_flags);
1812 static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
1814 struct snd_pcm_runtime *runtime = substream->runtime;
1815 switch (runtime->status->state) {
1816 case SNDRV_PCM_STATE_OPEN:
1817 case SNDRV_PCM_STATE_DISCONNECTED:
1818 case SNDRV_PCM_STATE_SUSPENDED:
1821 runtime->trigger_master = substream;
1825 static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
1827 struct snd_pcm_runtime *runtime = substream->runtime;
1828 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1829 switch (runtime->status->state) {
1830 case SNDRV_PCM_STATE_PREPARED:
1831 /* start playback stream if possible */
1832 if (! snd_pcm_playback_empty(substream)) {
1833 snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
1834 snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
1836 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1839 case SNDRV_PCM_STATE_RUNNING:
1840 runtime->status->state = SNDRV_PCM_STATE_DRAINING;
1842 case SNDRV_PCM_STATE_XRUN:
1843 runtime->status->state = SNDRV_PCM_STATE_SETUP;
1849 /* stop running stream */
1850 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
1851 int new_state = snd_pcm_capture_avail(runtime) > 0 ?
1852 SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
1853 snd_pcm_do_stop(substream, new_state);
1854 snd_pcm_post_stop(substream, new_state);
1858 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
1859 runtime->trigger_master == substream &&
1860 (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
1861 return substream->ops->trigger(substream,
1862 SNDRV_PCM_TRIGGER_DRAIN);
1867 static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
1871 static const struct action_ops snd_pcm_action_drain_init = {
1872 .pre_action = snd_pcm_pre_drain_init,
1873 .do_action = snd_pcm_do_drain_init,
1874 .post_action = snd_pcm_post_drain_init
1878 * Drain the stream(s).
1879 * When the substream is linked, sync until the draining of all playback streams
1881 * After this call, all streams are supposed to be either SETUP or DRAINING
1882 * (capture only) state.
1884 static int snd_pcm_drain(struct snd_pcm_substream *substream,
1887 struct snd_card *card;
1888 struct snd_pcm_runtime *runtime;
1889 struct snd_pcm_substream *s;
1890 struct snd_pcm_group *group;
1891 wait_queue_entry_t wait;
1895 card = substream->pcm->card;
1896 runtime = substream->runtime;
1898 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1902 if (file->f_flags & O_NONBLOCK)
1904 } else if (substream->f_flags & O_NONBLOCK)
1907 snd_pcm_stream_lock_irq(substream);
1909 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
1910 snd_pcm_pause(substream, 0);
1912 /* pre-start/stop - all running streams are changed to DRAINING state */
1913 result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
1916 /* in non-blocking, we don't wait in ioctl but let caller poll */
1924 struct snd_pcm_runtime *to_check;
1925 if (signal_pending(current)) {
1926 result = -ERESTARTSYS;
1929 /* find a substream to drain */
1931 group = snd_pcm_stream_group_ref(substream);
1932 snd_pcm_group_for_each_entry(s, substream) {
1933 if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
1935 runtime = s->runtime;
1936 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
1941 snd_pcm_group_unref(group, substream);
1943 break; /* all drained */
1944 init_waitqueue_entry(&wait, current);
1945 set_current_state(TASK_INTERRUPTIBLE);
1946 add_wait_queue(&to_check->sleep, &wait);
1947 snd_pcm_stream_unlock_irq(substream);
1948 if (runtime->no_period_wakeup)
1949 tout = MAX_SCHEDULE_TIMEOUT;
1952 if (runtime->rate) {
1953 long t = runtime->period_size * 2 / runtime->rate;
1954 tout = max(t, tout);
1956 tout = msecs_to_jiffies(tout * 1000);
1958 tout = schedule_timeout(tout);
1960 snd_pcm_stream_lock_irq(substream);
1961 group = snd_pcm_stream_group_ref(substream);
1962 snd_pcm_group_for_each_entry(s, substream) {
1963 if (s->runtime == to_check) {
1964 remove_wait_queue(&to_check->sleep, &wait);
1968 snd_pcm_group_unref(group, substream);
1970 if (card->shutdown) {
1975 if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
1978 dev_dbg(substream->pcm->card->dev,
1979 "playback drain error (DMA or IRQ trouble?)\n");
1980 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1988 snd_pcm_stream_unlock_irq(substream);
1996 * Immediately put all linked substreams into SETUP state.
1998 static int snd_pcm_drop(struct snd_pcm_substream *substream)
2000 struct snd_pcm_runtime *runtime;
2003 if (PCM_RUNTIME_CHECK(substream))
2005 runtime = substream->runtime;
2007 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
2008 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
2011 snd_pcm_stream_lock_irq(substream);
2013 if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
2014 snd_pcm_pause(substream, 0);
2016 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2017 /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
2018 snd_pcm_stream_unlock_irq(substream);
2024 static bool is_pcm_file(struct file *file)
2026 struct inode *inode = file_inode(file);
2027 struct snd_pcm *pcm;
2030 if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
2032 minor = iminor(inode);
2033 pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2035 pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2038 snd_card_unref(pcm->card);
2045 static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
2048 struct snd_pcm_file *pcm_file;
2049 struct snd_pcm_substream *substream1;
2050 struct snd_pcm_group *group, *target_group;
2051 bool nonatomic = substream->pcm->nonatomic;
2052 struct fd f = fdget(fd);
2056 if (!is_pcm_file(f.file)) {
2060 pcm_file = f.file->private_data;
2061 substream1 = pcm_file->substream;
2063 if (substream == substream1) {
2068 group = kzalloc(sizeof(*group), GFP_KERNEL);
2073 snd_pcm_group_init(group);
2075 down_write(&snd_pcm_link_rwsem);
2076 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
2077 substream->runtime->status->state != substream1->runtime->status->state ||
2078 substream->pcm->nonatomic != substream1->pcm->nonatomic) {
2082 if (snd_pcm_stream_linked(substream1)) {
2087 snd_pcm_stream_lock_irq(substream);
2088 if (!snd_pcm_stream_linked(substream)) {
2089 snd_pcm_group_assign(substream, group);
2090 group = NULL; /* assigned, don't free this one below */
2092 target_group = substream->group;
2093 snd_pcm_stream_unlock_irq(substream);
2095 snd_pcm_group_lock_irq(target_group, nonatomic);
2096 snd_pcm_stream_lock_nested(substream1);
2097 snd_pcm_group_assign(substream1, target_group);
2098 refcount_inc(&target_group->refs);
2099 snd_pcm_stream_unlock(substream1);
2100 snd_pcm_group_unlock_irq(target_group, nonatomic);
2102 up_write(&snd_pcm_link_rwsem);
2110 static void relink_to_local(struct snd_pcm_substream *substream)
2112 snd_pcm_stream_lock_nested(substream);
2113 snd_pcm_group_assign(substream, &substream->self_group);
2114 snd_pcm_stream_unlock(substream);
2117 static int snd_pcm_unlink(struct snd_pcm_substream *substream)
2119 struct snd_pcm_group *group;
2120 bool nonatomic = substream->pcm->nonatomic;
2121 bool do_free = false;
2124 down_write(&snd_pcm_link_rwsem);
2126 if (!snd_pcm_stream_linked(substream)) {
2131 group = substream->group;
2132 snd_pcm_group_lock_irq(group, nonatomic);
2134 relink_to_local(substream);
2135 refcount_dec(&group->refs);
2137 /* detach the last stream, too */
2138 if (list_is_singular(&group->substreams)) {
2139 relink_to_local(list_first_entry(&group->substreams,
2140 struct snd_pcm_substream,
2142 do_free = refcount_dec_and_test(&group->refs);
2145 snd_pcm_group_unlock_irq(group, nonatomic);
2150 up_write(&snd_pcm_link_rwsem);
2157 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
2158 struct snd_pcm_hw_rule *rule)
2160 struct snd_interval t;
2161 snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
2162 hw_param_interval_c(params, rule->deps[1]), &t);
2163 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2166 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
2167 struct snd_pcm_hw_rule *rule)
2169 struct snd_interval t;
2170 snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
2171 hw_param_interval_c(params, rule->deps[1]), &t);
2172 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2175 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
2176 struct snd_pcm_hw_rule *rule)
2178 struct snd_interval t;
2179 snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
2180 hw_param_interval_c(params, rule->deps[1]),
2181 (unsigned long) rule->private, &t);
2182 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2185 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
2186 struct snd_pcm_hw_rule *rule)
2188 struct snd_interval t;
2189 snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
2190 (unsigned long) rule->private,
2191 hw_param_interval_c(params, rule->deps[1]), &t);
2192 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2195 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
2196 struct snd_pcm_hw_rule *rule)
2199 const struct snd_interval *i =
2200 hw_param_interval_c(params, rule->deps[0]);
2202 struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2204 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2206 if (! snd_mask_test(mask, k))
2208 bits = snd_pcm_format_physical_width(k);
2210 continue; /* ignore invalid formats */
2211 if ((unsigned)bits < i->min || (unsigned)bits > i->max)
2212 snd_mask_reset(&m, k);
2214 return snd_mask_refine(mask, &m);
2217 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
2218 struct snd_pcm_hw_rule *rule)
2220 struct snd_interval t;
2226 for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
2228 if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
2230 bits = snd_pcm_format_physical_width(k);
2232 continue; /* ignore invalid formats */
2233 if (t.min > (unsigned)bits)
2235 if (t.max < (unsigned)bits)
2239 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2242 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
2243 #error "Change this table"
2246 static const unsigned int rates[] = {
2247 5512, 8000, 11025, 16000, 22050, 32000, 44100,
2248 48000, 64000, 88200, 96000, 176400, 192000, 352800, 384000
2251 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
2252 .count = ARRAY_SIZE(rates),
2256 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
2257 struct snd_pcm_hw_rule *rule)
2259 struct snd_pcm_hardware *hw = rule->private;
2260 return snd_interval_list(hw_param_interval(params, rule->var),
2261 snd_pcm_known_rates.count,
2262 snd_pcm_known_rates.list, hw->rates);
2265 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
2266 struct snd_pcm_hw_rule *rule)
2268 struct snd_interval t;
2269 struct snd_pcm_substream *substream = rule->private;
2271 t.max = substream->buffer_bytes_max;
2275 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2278 int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
2280 struct snd_pcm_runtime *runtime = substream->runtime;
2281 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
2284 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
2285 snd_mask_any(constrs_mask(constrs, k));
2288 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
2289 snd_interval_any(constrs_interval(constrs, k));
2292 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
2293 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
2294 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
2295 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
2296 snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
2298 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
2299 snd_pcm_hw_rule_format, NULL,
2300 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2303 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2304 snd_pcm_hw_rule_sample_bits, NULL,
2305 SNDRV_PCM_HW_PARAM_FORMAT,
2306 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2309 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2310 snd_pcm_hw_rule_div, NULL,
2311 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2314 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2315 snd_pcm_hw_rule_mul, NULL,
2316 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2319 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2320 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2321 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2324 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2325 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2326 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
2329 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2330 snd_pcm_hw_rule_div, NULL,
2331 SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2334 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2335 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2336 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
2339 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2340 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2341 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
2344 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
2345 snd_pcm_hw_rule_div, NULL,
2346 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2349 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2350 snd_pcm_hw_rule_div, NULL,
2351 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2354 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2355 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2356 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2359 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2360 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2361 SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2364 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2365 snd_pcm_hw_rule_mul, NULL,
2366 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2369 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2370 snd_pcm_hw_rule_mulkdiv, (void*) 8,
2371 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2374 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2375 snd_pcm_hw_rule_muldivk, (void*) 1000000,
2376 SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2379 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2380 snd_pcm_hw_rule_muldivk, (void*) 8,
2381 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2384 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2385 snd_pcm_hw_rule_muldivk, (void*) 8,
2386 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2389 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
2390 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2391 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2394 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
2395 snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2396 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2402 int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
2404 struct snd_pcm_runtime *runtime = substream->runtime;
2405 struct snd_pcm_hardware *hw = &runtime->hw;
2407 unsigned int mask = 0;
2409 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2410 mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
2411 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2412 mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
2413 if (hw_support_mmap(substream)) {
2414 if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2415 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
2416 if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2417 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
2418 if (hw->info & SNDRV_PCM_INFO_COMPLEX)
2419 mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX;
2421 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
2425 err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
2429 err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD);
2433 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
2434 hw->channels_min, hw->channels_max);
2438 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
2439 hw->rate_min, hw->rate_max);
2443 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2444 hw->period_bytes_min, hw->period_bytes_max);
2448 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
2449 hw->periods_min, hw->periods_max);
2453 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2454 hw->period_bytes_min, hw->buffer_bytes_max);
2458 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2459 snd_pcm_hw_rule_buffer_bytes_max, substream,
2460 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
2465 if (runtime->dma_bytes) {
2466 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
2471 if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
2472 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2473 snd_pcm_hw_rule_rate, hw,
2474 SNDRV_PCM_HW_PARAM_RATE, -1);
2479 /* FIXME: this belong to lowlevel */
2480 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
2485 static void pcm_release_private(struct snd_pcm_substream *substream)
2487 if (snd_pcm_stream_linked(substream))
2488 snd_pcm_unlink(substream);
2491 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2493 substream->ref_count--;
2494 if (substream->ref_count > 0)
2497 snd_pcm_drop(substream);
2498 if (substream->hw_opened) {
2499 if (substream->ops->hw_free &&
2500 substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
2501 substream->ops->hw_free(substream);
2502 substream->ops->close(substream);
2503 substream->hw_opened = 0;
2505 if (pm_qos_request_active(&substream->latency_pm_qos_req))
2506 pm_qos_remove_request(&substream->latency_pm_qos_req);
2507 if (substream->pcm_release) {
2508 substream->pcm_release(substream);
2509 substream->pcm_release = NULL;
2511 snd_pcm_detach_substream(substream);
2513 EXPORT_SYMBOL(snd_pcm_release_substream);
2515 int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2517 struct snd_pcm_substream **rsubstream)
2519 struct snd_pcm_substream *substream;
2522 err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2525 if (substream->ref_count > 1) {
2526 *rsubstream = substream;
2530 err = snd_pcm_hw_constraints_init(substream);
2532 pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
2536 if ((err = substream->ops->open(substream)) < 0)
2539 substream->hw_opened = 1;
2541 err = snd_pcm_hw_constraints_complete(substream);
2543 pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
2547 *rsubstream = substream;
2551 snd_pcm_release_substream(substream);
2554 EXPORT_SYMBOL(snd_pcm_open_substream);
2556 static int snd_pcm_open_file(struct file *file,
2557 struct snd_pcm *pcm,
2560 struct snd_pcm_file *pcm_file;
2561 struct snd_pcm_substream *substream;
2564 err = snd_pcm_open_substream(pcm, stream, file, &substream);
2568 pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2569 if (pcm_file == NULL) {
2570 snd_pcm_release_substream(substream);
2573 pcm_file->substream = substream;
2574 if (substream->ref_count == 1)
2575 substream->pcm_release = pcm_release_private;
2576 file->private_data = pcm_file;
2581 static int snd_pcm_playback_open(struct inode *inode, struct file *file)
2583 struct snd_pcm *pcm;
2584 int err = nonseekable_open(inode, file);
2587 pcm = snd_lookup_minor_data(iminor(inode),
2588 SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2589 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
2591 snd_card_unref(pcm->card);
2595 static int snd_pcm_capture_open(struct inode *inode, struct file *file)
2597 struct snd_pcm *pcm;
2598 int err = nonseekable_open(inode, file);
2601 pcm = snd_lookup_minor_data(iminor(inode),
2602 SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2603 err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
2605 snd_card_unref(pcm->card);
2609 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
2612 wait_queue_entry_t wait;
2618 err = snd_card_file_add(pcm->card, file);
2621 if (!try_module_get(pcm->card->module)) {
2625 init_waitqueue_entry(&wait, current);
2626 add_wait_queue(&pcm->open_wait, &wait);
2627 mutex_lock(&pcm->open_mutex);
2629 err = snd_pcm_open_file(file, pcm, stream);
2632 if (err == -EAGAIN) {
2633 if (file->f_flags & O_NONBLOCK) {
2639 set_current_state(TASK_INTERRUPTIBLE);
2640 mutex_unlock(&pcm->open_mutex);
2642 mutex_lock(&pcm->open_mutex);
2643 if (pcm->card->shutdown) {
2647 if (signal_pending(current)) {
2652 remove_wait_queue(&pcm->open_wait, &wait);
2653 mutex_unlock(&pcm->open_mutex);
2659 module_put(pcm->card->module);
2661 snd_card_file_remove(pcm->card, file);
2666 static int snd_pcm_release(struct inode *inode, struct file *file)
2668 struct snd_pcm *pcm;
2669 struct snd_pcm_substream *substream;
2670 struct snd_pcm_file *pcm_file;
2672 pcm_file = file->private_data;
2673 substream = pcm_file->substream;
2674 if (snd_BUG_ON(!substream))
2676 pcm = substream->pcm;
2677 mutex_lock(&pcm->open_mutex);
2678 snd_pcm_release_substream(substream);
2680 mutex_unlock(&pcm->open_mutex);
2681 wake_up(&pcm->open_wait);
2682 module_put(pcm->card->module);
2683 snd_card_file_remove(pcm->card, file);
2687 /* check and update PCM state; return 0 or a negative error
2688 * call this inside PCM lock
2690 static int do_pcm_hwsync(struct snd_pcm_substream *substream)
2692 switch (substream->runtime->status->state) {
2693 case SNDRV_PCM_STATE_DRAINING:
2694 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
2697 case SNDRV_PCM_STATE_RUNNING:
2698 return snd_pcm_update_hw_ptr(substream);
2699 case SNDRV_PCM_STATE_PREPARED:
2700 case SNDRV_PCM_STATE_PAUSED:
2702 case SNDRV_PCM_STATE_SUSPENDED:
2704 case SNDRV_PCM_STATE_XRUN:
2711 /* increase the appl_ptr; returns the processed frames or a negative error */
2712 static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2713 snd_pcm_uframes_t frames,
2714 snd_pcm_sframes_t avail)
2716 struct snd_pcm_runtime *runtime = substream->runtime;
2717 snd_pcm_sframes_t appl_ptr;
2722 if (frames > (snd_pcm_uframes_t)avail)
2724 appl_ptr = runtime->control->appl_ptr + frames;
2725 if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
2726 appl_ptr -= runtime->boundary;
2727 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2728 return ret < 0 ? ret : frames;
2731 /* decrease the appl_ptr; returns the processed frames or zero for error */
2732 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2733 snd_pcm_uframes_t frames,
2734 snd_pcm_sframes_t avail)
2736 struct snd_pcm_runtime *runtime = substream->runtime;
2737 snd_pcm_sframes_t appl_ptr;
2742 if (frames > (snd_pcm_uframes_t)avail)
2744 appl_ptr = runtime->control->appl_ptr - frames;
2746 appl_ptr += runtime->boundary;
2747 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2748 /* NOTE: we return zero for errors because PulseAudio gets depressed
2749 * upon receiving an error from rewind ioctl and stops processing
2750 * any longer. Returning zero means that no rewind is done, so
2751 * it's not absolutely wrong to answer like that.
2753 return ret < 0 ? 0 : frames;
2756 static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream,
2757 snd_pcm_uframes_t frames)
2759 snd_pcm_sframes_t ret;
2764 snd_pcm_stream_lock_irq(substream);
2765 ret = do_pcm_hwsync(substream);
2767 ret = rewind_appl_ptr(substream, frames,
2768 snd_pcm_hw_avail(substream));
2769 snd_pcm_stream_unlock_irq(substream);
2773 static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream,
2774 snd_pcm_uframes_t frames)
2776 snd_pcm_sframes_t ret;
2781 snd_pcm_stream_lock_irq(substream);
2782 ret = do_pcm_hwsync(substream);
2784 ret = forward_appl_ptr(substream, frames,
2785 snd_pcm_avail(substream));
2786 snd_pcm_stream_unlock_irq(substream);
2790 static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
2794 snd_pcm_stream_lock_irq(substream);
2795 err = do_pcm_hwsync(substream);
2796 snd_pcm_stream_unlock_irq(substream);
2800 static int snd_pcm_delay(struct snd_pcm_substream *substream,
2801 snd_pcm_sframes_t *delay)
2804 snd_pcm_sframes_t n = 0;
2806 snd_pcm_stream_lock_irq(substream);
2807 err = do_pcm_hwsync(substream);
2809 n = snd_pcm_calc_delay(substream);
2810 snd_pcm_stream_unlock_irq(substream);
2816 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2817 struct snd_pcm_sync_ptr __user *_sync_ptr)
2819 struct snd_pcm_runtime *runtime = substream->runtime;
2820 struct snd_pcm_sync_ptr sync_ptr;
2821 volatile struct snd_pcm_mmap_status *status;
2822 volatile struct snd_pcm_mmap_control *control;
2825 memset(&sync_ptr, 0, sizeof(sync_ptr));
2826 if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
2828 if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
2830 status = runtime->status;
2831 control = runtime->control;
2832 if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
2833 err = snd_pcm_hwsync(substream);
2837 snd_pcm_stream_lock_irq(substream);
2838 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
2839 err = pcm_lib_apply_appl_ptr(substream,
2840 sync_ptr.c.control.appl_ptr);
2842 snd_pcm_stream_unlock_irq(substream);
2846 sync_ptr.c.control.appl_ptr = control->appl_ptr;
2848 if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
2849 control->avail_min = sync_ptr.c.control.avail_min;
2851 sync_ptr.c.control.avail_min = control->avail_min;
2852 sync_ptr.s.status.state = status->state;
2853 sync_ptr.s.status.hw_ptr = status->hw_ptr;
2854 sync_ptr.s.status.tstamp = status->tstamp;
2855 sync_ptr.s.status.suspended_state = status->suspended_state;
2856 sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
2857 snd_pcm_stream_unlock_irq(substream);
2858 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
2863 static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
2865 struct snd_pcm_runtime *runtime = substream->runtime;
2868 if (get_user(arg, _arg))
2870 if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
2872 runtime->tstamp_type = arg;
2876 static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream,
2877 struct snd_xferi __user *_xferi)
2879 struct snd_xferi xferi;
2880 struct snd_pcm_runtime *runtime = substream->runtime;
2881 snd_pcm_sframes_t result;
2883 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2885 if (put_user(0, &_xferi->result))
2887 if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
2889 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2890 result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
2892 result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
2893 __put_user(result, &_xferi->result);
2894 return result < 0 ? result : 0;
2897 static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream,
2898 struct snd_xfern __user *_xfern)
2900 struct snd_xfern xfern;
2901 struct snd_pcm_runtime *runtime = substream->runtime;
2903 snd_pcm_sframes_t result;
2905 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2907 if (runtime->channels > 128)
2909 if (put_user(0, &_xfern->result))
2911 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2914 bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels);
2916 return PTR_ERR(bufs);
2917 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2918 result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
2920 result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
2922 __put_user(result, &_xfern->result);
2923 return result < 0 ? result : 0;
2926 static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream,
2927 snd_pcm_uframes_t __user *_frames)
2929 snd_pcm_uframes_t frames;
2930 snd_pcm_sframes_t result;
2932 if (get_user(frames, _frames))
2934 if (put_user(0, _frames))
2936 result = snd_pcm_rewind(substream, frames);
2937 __put_user(result, _frames);
2938 return result < 0 ? result : 0;
2941 static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream,
2942 snd_pcm_uframes_t __user *_frames)
2944 snd_pcm_uframes_t frames;
2945 snd_pcm_sframes_t result;
2947 if (get_user(frames, _frames))
2949 if (put_user(0, _frames))
2951 result = snd_pcm_forward(substream, frames);
2952 __put_user(result, _frames);
2953 return result < 0 ? result : 0;
2956 static int snd_pcm_common_ioctl(struct file *file,
2957 struct snd_pcm_substream *substream,
2958 unsigned int cmd, void __user *arg)
2960 struct snd_pcm_file *pcm_file = file->private_data;
2963 if (PCM_RUNTIME_CHECK(substream))
2966 res = snd_power_wait(substream->pcm->card, SNDRV_CTL_POWER_D0);
2971 case SNDRV_PCM_IOCTL_PVERSION:
2972 return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
2973 case SNDRV_PCM_IOCTL_INFO:
2974 return snd_pcm_info_user(substream, arg);
2975 case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */
2977 case SNDRV_PCM_IOCTL_TTSTAMP:
2978 return snd_pcm_tstamp(substream, arg);
2979 case SNDRV_PCM_IOCTL_USER_PVERSION:
2980 if (get_user(pcm_file->user_pversion,
2981 (unsigned int __user *)arg))
2984 case SNDRV_PCM_IOCTL_HW_REFINE:
2985 return snd_pcm_hw_refine_user(substream, arg);
2986 case SNDRV_PCM_IOCTL_HW_PARAMS:
2987 return snd_pcm_hw_params_user(substream, arg);
2988 case SNDRV_PCM_IOCTL_HW_FREE:
2989 return snd_pcm_hw_free(substream);
2990 case SNDRV_PCM_IOCTL_SW_PARAMS:
2991 return snd_pcm_sw_params_user(substream, arg);
2992 case SNDRV_PCM_IOCTL_STATUS:
2993 return snd_pcm_status_user(substream, arg, false);
2994 case SNDRV_PCM_IOCTL_STATUS_EXT:
2995 return snd_pcm_status_user(substream, arg, true);
2996 case SNDRV_PCM_IOCTL_CHANNEL_INFO:
2997 return snd_pcm_channel_info_user(substream, arg);
2998 case SNDRV_PCM_IOCTL_PREPARE:
2999 return snd_pcm_prepare(substream, file);
3000 case SNDRV_PCM_IOCTL_RESET:
3001 return snd_pcm_reset(substream);
3002 case SNDRV_PCM_IOCTL_START:
3003 return snd_pcm_start_lock_irq(substream);
3004 case SNDRV_PCM_IOCTL_LINK:
3005 return snd_pcm_link(substream, (int)(unsigned long) arg);
3006 case SNDRV_PCM_IOCTL_UNLINK:
3007 return snd_pcm_unlink(substream);
3008 case SNDRV_PCM_IOCTL_RESUME:
3009 return snd_pcm_resume(substream);
3010 case SNDRV_PCM_IOCTL_XRUN:
3011 return snd_pcm_xrun(substream);
3012 case SNDRV_PCM_IOCTL_HWSYNC:
3013 return snd_pcm_hwsync(substream);
3014 case SNDRV_PCM_IOCTL_DELAY:
3016 snd_pcm_sframes_t delay;
3017 snd_pcm_sframes_t __user *res = arg;
3020 err = snd_pcm_delay(substream, &delay);
3023 if (put_user(delay, res))
3027 case SNDRV_PCM_IOCTL_SYNC_PTR:
3028 return snd_pcm_sync_ptr(substream, arg);
3029 #ifdef CONFIG_SND_SUPPORT_OLD_API
3030 case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
3031 return snd_pcm_hw_refine_old_user(substream, arg);
3032 case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
3033 return snd_pcm_hw_params_old_user(substream, arg);
3035 case SNDRV_PCM_IOCTL_DRAIN:
3036 return snd_pcm_drain(substream, file);
3037 case SNDRV_PCM_IOCTL_DROP:
3038 return snd_pcm_drop(substream);
3039 case SNDRV_PCM_IOCTL_PAUSE:
3040 return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
3042 (int)(unsigned long)arg);
3043 case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
3044 case SNDRV_PCM_IOCTL_READI_FRAMES:
3045 return snd_pcm_xferi_frames_ioctl(substream, arg);
3046 case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
3047 case SNDRV_PCM_IOCTL_READN_FRAMES:
3048 return snd_pcm_xfern_frames_ioctl(substream, arg);
3049 case SNDRV_PCM_IOCTL_REWIND:
3050 return snd_pcm_rewind_ioctl(substream, arg);
3051 case SNDRV_PCM_IOCTL_FORWARD:
3052 return snd_pcm_forward_ioctl(substream, arg);
3054 pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
3058 static long snd_pcm_ioctl(struct file *file, unsigned int cmd,
3061 struct snd_pcm_file *pcm_file;
3063 pcm_file = file->private_data;
3065 if (((cmd >> 8) & 0xff) != 'A')
3068 return snd_pcm_common_ioctl(file, pcm_file->substream, cmd,
3069 (void __user *)arg);
3073 * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
3074 * @substream: PCM substream
3076 * @arg: IOCTL argument
3078 * The function is provided primarily for OSS layer and USB gadget drivers,
3079 * and it allows only the limited set of ioctls (hw_params, sw_params,
3080 * prepare, start, drain, drop, forward).
3082 int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3083 unsigned int cmd, void *arg)
3085 snd_pcm_uframes_t *frames = arg;
3086 snd_pcm_sframes_t result;
3089 case SNDRV_PCM_IOCTL_FORWARD:
3091 /* provided only for OSS; capture-only and no value returned */
3092 if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
3094 result = snd_pcm_forward(substream, *frames);
3095 return result < 0 ? result : 0;
3097 case SNDRV_PCM_IOCTL_HW_PARAMS:
3098 return snd_pcm_hw_params(substream, arg);
3099 case SNDRV_PCM_IOCTL_SW_PARAMS:
3100 return snd_pcm_sw_params(substream, arg);
3101 case SNDRV_PCM_IOCTL_PREPARE:
3102 return snd_pcm_prepare(substream, NULL);
3103 case SNDRV_PCM_IOCTL_START:
3104 return snd_pcm_start_lock_irq(substream);
3105 case SNDRV_PCM_IOCTL_DRAIN:
3106 return snd_pcm_drain(substream, NULL);
3107 case SNDRV_PCM_IOCTL_DROP:
3108 return snd_pcm_drop(substream);
3109 case SNDRV_PCM_IOCTL_DELAY:
3110 return snd_pcm_delay(substream, frames);
3115 EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
3117 static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
3120 struct snd_pcm_file *pcm_file;
3121 struct snd_pcm_substream *substream;
3122 struct snd_pcm_runtime *runtime;
3123 snd_pcm_sframes_t result;
3125 pcm_file = file->private_data;
3126 substream = pcm_file->substream;
3127 if (PCM_RUNTIME_CHECK(substream))
3129 runtime = substream->runtime;
3130 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3132 if (!frame_aligned(runtime, count))
3134 count = bytes_to_frames(runtime, count);
3135 result = snd_pcm_lib_read(substream, buf, count);
3137 result = frames_to_bytes(runtime, result);
3141 static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
3142 size_t count, loff_t * offset)
3144 struct snd_pcm_file *pcm_file;
3145 struct snd_pcm_substream *substream;
3146 struct snd_pcm_runtime *runtime;
3147 snd_pcm_sframes_t result;
3149 pcm_file = file->private_data;
3150 substream = pcm_file->substream;
3151 if (PCM_RUNTIME_CHECK(substream))
3153 runtime = substream->runtime;
3154 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3156 if (!frame_aligned(runtime, count))
3158 count = bytes_to_frames(runtime, count);
3159 result = snd_pcm_lib_write(substream, buf, count);
3161 result = frames_to_bytes(runtime, result);
3165 static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
3167 struct snd_pcm_file *pcm_file;
3168 struct snd_pcm_substream *substream;
3169 struct snd_pcm_runtime *runtime;
3170 snd_pcm_sframes_t result;
3173 snd_pcm_uframes_t frames;
3175 pcm_file = iocb->ki_filp->private_data;
3176 substream = pcm_file->substream;
3177 if (PCM_RUNTIME_CHECK(substream))
3179 runtime = substream->runtime;
3180 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3182 if (!iter_is_iovec(to))
3184 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
3186 if (!frame_aligned(runtime, to->iov->iov_len))
3188 frames = bytes_to_samples(runtime, to->iov->iov_len);
3189 bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
3192 for (i = 0; i < to->nr_segs; ++i)
3193 bufs[i] = to->iov[i].iov_base;
3194 result = snd_pcm_lib_readv(substream, bufs, frames);
3196 result = frames_to_bytes(runtime, result);
3201 static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
3203 struct snd_pcm_file *pcm_file;
3204 struct snd_pcm_substream *substream;
3205 struct snd_pcm_runtime *runtime;
3206 snd_pcm_sframes_t result;
3209 snd_pcm_uframes_t frames;
3211 pcm_file = iocb->ki_filp->private_data;
3212 substream = pcm_file->substream;
3213 if (PCM_RUNTIME_CHECK(substream))
3215 runtime = substream->runtime;
3216 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3218 if (!iter_is_iovec(from))
3220 if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
3221 !frame_aligned(runtime, from->iov->iov_len))
3223 frames = bytes_to_samples(runtime, from->iov->iov_len);
3224 bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
3227 for (i = 0; i < from->nr_segs; ++i)
3228 bufs[i] = from->iov[i].iov_base;
3229 result = snd_pcm_lib_writev(substream, bufs, frames);
3231 result = frames_to_bytes(runtime, result);
3236 static __poll_t snd_pcm_poll(struct file *file, poll_table *wait)
3238 struct snd_pcm_file *pcm_file;
3239 struct snd_pcm_substream *substream;
3240 struct snd_pcm_runtime *runtime;
3242 snd_pcm_uframes_t avail;
3244 pcm_file = file->private_data;
3246 substream = pcm_file->substream;
3247 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3248 ok = EPOLLOUT | EPOLLWRNORM;
3250 ok = EPOLLIN | EPOLLRDNORM;
3251 if (PCM_RUNTIME_CHECK(substream))
3252 return ok | EPOLLERR;
3254 runtime = substream->runtime;
3255 poll_wait(file, &runtime->sleep, wait);
3258 snd_pcm_stream_lock_irq(substream);
3259 avail = snd_pcm_avail(substream);
3260 switch (runtime->status->state) {
3261 case SNDRV_PCM_STATE_RUNNING:
3262 case SNDRV_PCM_STATE_PREPARED:
3263 case SNDRV_PCM_STATE_PAUSED:
3264 if (avail >= runtime->control->avail_min)
3267 case SNDRV_PCM_STATE_DRAINING:
3268 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
3275 mask = ok | EPOLLERR;
3278 snd_pcm_stream_unlock_irq(substream);
3287 * Only on coherent architectures, we can mmap the status and the control records
3288 * for effcient data transfer. On others, we have to use HWSYNC ioctl...
3290 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3292 * mmap status record
3294 static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3296 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3297 struct snd_pcm_runtime *runtime;
3299 if (substream == NULL)
3300 return VM_FAULT_SIGBUS;
3301 runtime = substream->runtime;
3302 vmf->page = virt_to_page(runtime->status);
3303 get_page(vmf->page);
3307 static const struct vm_operations_struct snd_pcm_vm_ops_status =
3309 .fault = snd_pcm_mmap_status_fault,
3312 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3313 struct vm_area_struct *area)
3316 if (!(area->vm_flags & VM_READ))
3318 size = area->vm_end - area->vm_start;
3319 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3321 area->vm_ops = &snd_pcm_vm_ops_status;
3322 area->vm_private_data = substream;
3323 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3328 * mmap control record
3330 static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3332 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3333 struct snd_pcm_runtime *runtime;
3335 if (substream == NULL)
3336 return VM_FAULT_SIGBUS;
3337 runtime = substream->runtime;
3338 vmf->page = virt_to_page(runtime->control);
3339 get_page(vmf->page);
3343 static const struct vm_operations_struct snd_pcm_vm_ops_control =
3345 .fault = snd_pcm_mmap_control_fault,
3348 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3349 struct vm_area_struct *area)
3352 if (!(area->vm_flags & VM_READ))
3354 size = area->vm_end - area->vm_start;
3355 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3357 area->vm_ops = &snd_pcm_vm_ops_control;
3358 area->vm_private_data = substream;
3359 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3363 static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
3365 if (pcm_file->no_compat_mmap)
3367 /* See pcm_control_mmap_allowed() below.
3368 * Since older alsa-lib requires both status and control mmaps to be
3369 * coupled, we have to disable the status mmap for old alsa-lib, too.
3371 if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
3372 (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
3377 static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
3379 if (pcm_file->no_compat_mmap)
3381 /* Disallow the control mmap when SYNC_APPLPTR flag is set;
3382 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3383 * thus it effectively assures the manual update of appl_ptr.
3385 if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
3390 #else /* ! coherent mmap */
3392 * don't support mmap for status and control records.
3394 #define pcm_status_mmap_allowed(pcm_file) false
3395 #define pcm_control_mmap_allowed(pcm_file) false
3397 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3398 struct vm_area_struct *area)
3402 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3403 struct vm_area_struct *area)
3407 #endif /* coherent mmap */
3409 static inline struct page *
3410 snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
3412 void *vaddr = substream->runtime->dma_area + ofs;
3413 return virt_to_page(vaddr);
3417 * fault callback for mmapping a RAM page
3419 static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3421 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3422 struct snd_pcm_runtime *runtime;
3423 unsigned long offset;
3427 if (substream == NULL)
3428 return VM_FAULT_SIGBUS;
3429 runtime = substream->runtime;
3430 offset = vmf->pgoff << PAGE_SHIFT;
3431 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3432 if (offset > dma_bytes - PAGE_SIZE)
3433 return VM_FAULT_SIGBUS;
3434 if (substream->ops->page)
3435 page = substream->ops->page(substream, offset);
3437 page = snd_pcm_default_page_ops(substream, offset);
3439 return VM_FAULT_SIGBUS;
3445 static const struct vm_operations_struct snd_pcm_vm_ops_data = {
3446 .open = snd_pcm_mmap_data_open,
3447 .close = snd_pcm_mmap_data_close,
3450 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3451 .open = snd_pcm_mmap_data_open,
3452 .close = snd_pcm_mmap_data_close,
3453 .fault = snd_pcm_mmap_data_fault,
3457 * mmap the DMA buffer on RAM
3461 * snd_pcm_lib_default_mmap - Default PCM data mmap function
3462 * @substream: PCM substream
3465 * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
3466 * this function is invoked implicitly.
3468 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3469 struct vm_area_struct *area)
3471 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3472 #ifdef CONFIG_GENERIC_ALLOCATOR
3473 if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
3474 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
3475 return remap_pfn_range(area, area->vm_start,
3476 substream->dma_buffer.addr >> PAGE_SHIFT,
3477 area->vm_end - area->vm_start, area->vm_page_prot);
3479 #endif /* CONFIG_GENERIC_ALLOCATOR */
3480 #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
3481 if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
3482 (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
3483 substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
3484 return dma_mmap_coherent(substream->dma_buffer.dev.dev,
3486 substream->runtime->dma_area,
3487 substream->runtime->dma_addr,
3488 substream->runtime->dma_bytes);
3489 #endif /* CONFIG_X86 */
3490 /* mmap with fault handler */
3491 area->vm_ops = &snd_pcm_vm_ops_data_fault;
3494 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3497 * mmap the DMA buffer on I/O memory area
3499 #if SNDRV_PCM_INFO_MMAP_IOMEM
3501 * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3502 * @substream: PCM substream
3505 * When your hardware uses the iomapped pages as the hardware buffer and
3506 * wants to mmap it, pass this function as mmap pcm_ops. Note that this
3507 * is supposed to work only on limited architectures.
3509 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3510 struct vm_area_struct *area)
3512 struct snd_pcm_runtime *runtime = substream->runtime;
3514 area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3515 return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3517 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
3518 #endif /* SNDRV_PCM_INFO_MMAP */
3523 int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
3524 struct vm_area_struct *area)
3526 struct snd_pcm_runtime *runtime;
3528 unsigned long offset;
3532 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3533 if (!(area->vm_flags & (VM_WRITE|VM_READ)))
3536 if (!(area->vm_flags & VM_READ))
3539 runtime = substream->runtime;
3540 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3542 if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
3544 if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
3545 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
3547 size = area->vm_end - area->vm_start;
3548 offset = area->vm_pgoff << PAGE_SHIFT;
3549 dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3550 if ((size_t)size > dma_bytes)
3552 if (offset > dma_bytes - size)
3555 area->vm_ops = &snd_pcm_vm_ops_data;
3556 area->vm_private_data = substream;
3557 if (substream->ops->mmap)
3558 err = substream->ops->mmap(substream, area);
3560 err = snd_pcm_lib_default_mmap(substream, area);
3562 atomic_inc(&substream->mmap_count);
3565 EXPORT_SYMBOL(snd_pcm_mmap_data);
3567 static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
3569 struct snd_pcm_file * pcm_file;
3570 struct snd_pcm_substream *substream;
3571 unsigned long offset;
3573 pcm_file = file->private_data;
3574 substream = pcm_file->substream;
3575 if (PCM_RUNTIME_CHECK(substream))
3578 offset = area->vm_pgoff << PAGE_SHIFT;
3580 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3581 if (!pcm_status_mmap_allowed(pcm_file))
3583 return snd_pcm_mmap_status(substream, file, area);
3584 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3585 if (!pcm_control_mmap_allowed(pcm_file))
3587 return snd_pcm_mmap_control(substream, file, area);
3589 return snd_pcm_mmap_data(substream, file, area);
3594 static int snd_pcm_fasync(int fd, struct file * file, int on)
3596 struct snd_pcm_file * pcm_file;
3597 struct snd_pcm_substream *substream;
3598 struct snd_pcm_runtime *runtime;
3600 pcm_file = file->private_data;
3601 substream = pcm_file->substream;
3602 if (PCM_RUNTIME_CHECK(substream))
3604 runtime = substream->runtime;
3605 return fasync_helper(fd, file, on, &runtime->fasync);
3611 #ifdef CONFIG_COMPAT
3612 #include "pcm_compat.c"
3614 #define snd_pcm_ioctl_compat NULL
3618 * To be removed helpers to keep binary compatibility
3621 #ifdef CONFIG_SND_SUPPORT_OLD_API
3622 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
3623 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
3625 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
3626 struct snd_pcm_hw_params_old *oparams)
3630 memset(params, 0, sizeof(*params));
3631 params->flags = oparams->flags;
3632 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3633 params->masks[i].bits[0] = oparams->masks[i];
3634 memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
3635 params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
3636 params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
3637 params->info = oparams->info;
3638 params->msbits = oparams->msbits;
3639 params->rate_num = oparams->rate_num;
3640 params->rate_den = oparams->rate_den;
3641 params->fifo_size = oparams->fifo_size;
3644 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
3645 struct snd_pcm_hw_params *params)
3649 memset(oparams, 0, sizeof(*oparams));
3650 oparams->flags = params->flags;
3651 for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3652 oparams->masks[i] = params->masks[i].bits[0];
3653 memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
3654 oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
3655 oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
3656 oparams->info = params->info;
3657 oparams->msbits = params->msbits;
3658 oparams->rate_num = params->rate_num;
3659 oparams->rate_den = params->rate_den;
3660 oparams->fifo_size = params->fifo_size;
3663 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3664 struct snd_pcm_hw_params_old __user * _oparams)
3666 struct snd_pcm_hw_params *params;
3667 struct snd_pcm_hw_params_old *oparams = NULL;
3670 params = kmalloc(sizeof(*params), GFP_KERNEL);
3674 oparams = memdup_user(_oparams, sizeof(*oparams));
3675 if (IS_ERR(oparams)) {
3676 err = PTR_ERR(oparams);
3679 snd_pcm_hw_convert_from_old_params(params, oparams);
3680 err = snd_pcm_hw_refine(substream, params);
3684 err = fixup_unreferenced_params(substream, params);
3688 snd_pcm_hw_convert_to_old_params(oparams, params);
3689 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3698 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3699 struct snd_pcm_hw_params_old __user * _oparams)
3701 struct snd_pcm_hw_params *params;
3702 struct snd_pcm_hw_params_old *oparams = NULL;
3705 params = kmalloc(sizeof(*params), GFP_KERNEL);
3709 oparams = memdup_user(_oparams, sizeof(*oparams));
3710 if (IS_ERR(oparams)) {
3711 err = PTR_ERR(oparams);
3715 snd_pcm_hw_convert_from_old_params(params, oparams);
3716 err = snd_pcm_hw_params(substream, params);
3720 snd_pcm_hw_convert_to_old_params(oparams, params);
3721 if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3729 #endif /* CONFIG_SND_SUPPORT_OLD_API */
3732 static unsigned long snd_pcm_get_unmapped_area(struct file *file,
3735 unsigned long pgoff,
3736 unsigned long flags)
3738 struct snd_pcm_file *pcm_file = file->private_data;
3739 struct snd_pcm_substream *substream = pcm_file->substream;
3740 struct snd_pcm_runtime *runtime = substream->runtime;
3741 unsigned long offset = pgoff << PAGE_SHIFT;
3744 case SNDRV_PCM_MMAP_OFFSET_STATUS:
3745 return (unsigned long)runtime->status;
3746 case SNDRV_PCM_MMAP_OFFSET_CONTROL:
3747 return (unsigned long)runtime->control;
3749 return (unsigned long)runtime->dma_area + offset;
3753 # define snd_pcm_get_unmapped_area NULL
3760 const struct file_operations snd_pcm_f_ops[2] = {
3762 .owner = THIS_MODULE,
3763 .write = snd_pcm_write,
3764 .write_iter = snd_pcm_writev,
3765 .open = snd_pcm_playback_open,
3766 .release = snd_pcm_release,
3767 .llseek = no_llseek,
3768 .poll = snd_pcm_poll,
3769 .unlocked_ioctl = snd_pcm_ioctl,
3770 .compat_ioctl = snd_pcm_ioctl_compat,
3771 .mmap = snd_pcm_mmap,
3772 .fasync = snd_pcm_fasync,
3773 .get_unmapped_area = snd_pcm_get_unmapped_area,
3776 .owner = THIS_MODULE,
3777 .read = snd_pcm_read,
3778 .read_iter = snd_pcm_readv,
3779 .open = snd_pcm_capture_open,
3780 .release = snd_pcm_release,
3781 .llseek = no_llseek,
3782 .poll = snd_pcm_poll,
3783 .unlocked_ioctl = snd_pcm_ioctl,
3784 .compat_ioctl = snd_pcm_ioctl_compat,
3785 .mmap = snd_pcm_mmap,
3786 .fasync = snd_pcm_fasync,
3787 .get_unmapped_area = snd_pcm_get_unmapped_area,