1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *  Digital Audio (PCM) abstract layer
4   *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5   */
6  
7  #include <linux/compat.h>
8  #include <linux/mm.h>
9  #include <linux/module.h>
10  #include <linux/file.h>
11  #include <linux/slab.h>
12  #include <linux/sched/signal.h>
13  #include <linux/time.h>
14  #include <linux/pm_qos.h>
15  #include <linux/io.h>
16  #include <linux/dma-mapping.h>
17  #include <linux/vmalloc.h>
18  #include <sound/core.h>
19  #include <sound/control.h>
20  #include <sound/info.h>
21  #include <sound/pcm.h>
22  #include <sound/pcm_params.h>
23  #include <sound/timer.h>
24  #include <sound/minors.h>
25  #include <linux/uio.h>
26  #include <linux/delay.h>
27  
28  #include "pcm_local.h"
29  
30  #ifdef CONFIG_SND_DEBUG
31  #define CREATE_TRACE_POINTS
32  #include "pcm_param_trace.h"
33  #else
34  #define trace_hw_mask_param_enabled()		0
35  #define trace_hw_interval_param_enabled()	0
36  #define trace_hw_mask_param(substream, type, index, prev, curr)
37  #define trace_hw_interval_param(substream, type, index, prev, curr)
38  #endif
39  
40  /*
41   *  Compatibility
42   */
43  
44  struct snd_pcm_hw_params_old {
45  	unsigned int flags;
46  	unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
47  			   SNDRV_PCM_HW_PARAM_ACCESS + 1];
48  	struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
49  					SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
50  	unsigned int rmask;
51  	unsigned int cmask;
52  	unsigned int info;
53  	unsigned int msbits;
54  	unsigned int rate_num;
55  	unsigned int rate_den;
56  	snd_pcm_uframes_t fifo_size;
57  	unsigned char reserved[64];
58  };
59  
60  #ifdef CONFIG_SND_SUPPORT_OLD_API
61  #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
62  #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
63  
64  static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
65  				      struct snd_pcm_hw_params_old __user * _oparams);
66  static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
67  				      struct snd_pcm_hw_params_old __user * _oparams);
68  #endif
69  static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
70  
71  /*
72   *
73   */
74  
75  static DECLARE_RWSEM(snd_pcm_link_rwsem);
76  
snd_pcm_group_init(struct snd_pcm_group * group)77  void snd_pcm_group_init(struct snd_pcm_group *group)
78  {
79  	spin_lock_init(&group->lock);
80  	mutex_init(&group->mutex);
81  	INIT_LIST_HEAD(&group->substreams);
82  	refcount_set(&group->refs, 1);
83  }
84  
85  /* define group lock helpers */
86  #define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \
87  static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
88  { \
89  	if (nonatomic) \
90  		mutex_ ## mutex_action(&group->mutex); \
91  	else \
92  		spin_ ## action(&group->lock); \
93  }
94  
95  DEFINE_PCM_GROUP_LOCK(lock, lock);
96  DEFINE_PCM_GROUP_LOCK(unlock, unlock);
97  DEFINE_PCM_GROUP_LOCK(lock_irq, lock);
98  DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock);
99  
100  /**
101   * snd_pcm_stream_lock - Lock the PCM stream
102   * @substream: PCM substream
103   *
104   * This locks the PCM stream's spinlock or mutex depending on the nonatomic
105   * flag of the given substream.  This also takes the global link rw lock
106   * (or rw sem), too, for avoiding the race with linked streams.
107   */
snd_pcm_stream_lock(struct snd_pcm_substream * substream)108  void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
109  {
110  	snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic);
111  }
112  EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
113  
114  /**
115   * snd_pcm_stream_unlock - Unlock the PCM stream
116   * @substream: PCM substream
117   *
118   * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
119   */
snd_pcm_stream_unlock(struct snd_pcm_substream * substream)120  void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
121  {
122  	snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic);
123  }
124  EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
125  
126  /**
127   * snd_pcm_stream_lock_irq - Lock the PCM stream
128   * @substream: PCM substream
129   *
130   * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
131   * IRQ (only when nonatomic is false).  In nonatomic case, this is identical
132   * as snd_pcm_stream_lock().
133   */
snd_pcm_stream_lock_irq(struct snd_pcm_substream * substream)134  void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
135  {
136  	snd_pcm_group_lock_irq(&substream->self_group,
137  			       substream->pcm->nonatomic);
138  }
139  EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
140  
snd_pcm_stream_lock_nested(struct snd_pcm_substream * substream)141  static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream)
142  {
143  	struct snd_pcm_group *group = &substream->self_group;
144  
145  	if (substream->pcm->nonatomic)
146  		mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING);
147  	else
148  		spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
149  }
150  
151  /**
152   * snd_pcm_stream_unlock_irq - Unlock the PCM stream
153   * @substream: PCM substream
154   *
155   * This is a counter-part of snd_pcm_stream_lock_irq().
156   */
snd_pcm_stream_unlock_irq(struct snd_pcm_substream * substream)157  void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
158  {
159  	snd_pcm_group_unlock_irq(&substream->self_group,
160  				 substream->pcm->nonatomic);
161  }
162  EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
163  
_snd_pcm_stream_lock_irqsave(struct snd_pcm_substream * substream)164  unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
165  {
166  	unsigned long flags = 0;
167  	if (substream->pcm->nonatomic)
168  		mutex_lock(&substream->self_group.mutex);
169  	else
170  		spin_lock_irqsave(&substream->self_group.lock, flags);
171  	return flags;
172  }
173  EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
174  
_snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream * substream)175  unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream)
176  {
177  	unsigned long flags = 0;
178  	if (substream->pcm->nonatomic)
179  		mutex_lock_nested(&substream->self_group.mutex,
180  				  SINGLE_DEPTH_NESTING);
181  	else
182  		spin_lock_irqsave_nested(&substream->self_group.lock, flags,
183  					 SINGLE_DEPTH_NESTING);
184  	return flags;
185  }
186  EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave_nested);
187  
188  /**
189   * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
190   * @substream: PCM substream
191   * @flags: irq flags
192   *
193   * This is a counter-part of snd_pcm_stream_lock_irqsave().
194   */
snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream * substream,unsigned long flags)195  void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
196  				      unsigned long flags)
197  {
198  	if (substream->pcm->nonatomic)
199  		mutex_unlock(&substream->self_group.mutex);
200  	else
201  		spin_unlock_irqrestore(&substream->self_group.lock, flags);
202  }
203  EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
204  
205  /* Run PCM ioctl ops */
snd_pcm_ops_ioctl(struct snd_pcm_substream * substream,unsigned cmd,void * arg)206  static int snd_pcm_ops_ioctl(struct snd_pcm_substream *substream,
207  			     unsigned cmd, void *arg)
208  {
209  	if (substream->ops->ioctl)
210  		return substream->ops->ioctl(substream, cmd, arg);
211  	else
212  		return snd_pcm_lib_ioctl(substream, cmd, arg);
213  }
214  
snd_pcm_info(struct snd_pcm_substream * substream,struct snd_pcm_info * info)215  int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
216  {
217  	struct snd_pcm *pcm = substream->pcm;
218  	struct snd_pcm_str *pstr = substream->pstr;
219  
220  	memset(info, 0, sizeof(*info));
221  	info->card = pcm->card->number;
222  	info->device = pcm->device;
223  	info->stream = substream->stream;
224  	info->subdevice = substream->number;
225  	strscpy(info->id, pcm->id, sizeof(info->id));
226  	strscpy(info->name, pcm->name, sizeof(info->name));
227  	info->dev_class = pcm->dev_class;
228  	info->dev_subclass = pcm->dev_subclass;
229  	info->subdevices_count = pstr->substream_count;
230  	info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
231  	strscpy(info->subname, substream->name, sizeof(info->subname));
232  
233  	return 0;
234  }
235  
snd_pcm_info_user(struct snd_pcm_substream * substream,struct snd_pcm_info __user * _info)236  int snd_pcm_info_user(struct snd_pcm_substream *substream,
237  		      struct snd_pcm_info __user * _info)
238  {
239  	struct snd_pcm_info *info __free(kfree) = NULL;
240  	int err;
241  
242  	info = kmalloc(sizeof(*info), GFP_KERNEL);
243  	if (! info)
244  		return -ENOMEM;
245  	err = snd_pcm_info(substream, info);
246  	if (err >= 0) {
247  		if (copy_to_user(_info, info, sizeof(*info)))
248  			err = -EFAULT;
249  	}
250  	return err;
251  }
252  
253  /* macro for simplified cast */
254  #define PARAM_MASK_BIT(b)	(1U << (__force int)(b))
255  
hw_support_mmap(struct snd_pcm_substream * substream)256  static bool hw_support_mmap(struct snd_pcm_substream *substream)
257  {
258  	struct snd_dma_buffer *dmabuf;
259  
260  	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
261  		return false;
262  
263  	if (substream->ops->mmap || substream->ops->page)
264  		return true;
265  
266  	dmabuf = snd_pcm_get_dma_buf(substream);
267  	if (!dmabuf)
268  		dmabuf = &substream->dma_buffer;
269  	switch (dmabuf->dev.type) {
270  	case SNDRV_DMA_TYPE_UNKNOWN:
271  		/* we can't know the device, so just assume that the driver does
272  		 * everything right
273  		 */
274  		return true;
275  	case SNDRV_DMA_TYPE_CONTINUOUS:
276  	case SNDRV_DMA_TYPE_VMALLOC:
277  		return true;
278  	default:
279  		return dma_can_mmap(dmabuf->dev.dev);
280  	}
281  }
282  
constrain_mask_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)283  static int constrain_mask_params(struct snd_pcm_substream *substream,
284  				 struct snd_pcm_hw_params *params)
285  {
286  	struct snd_pcm_hw_constraints *constrs =
287  					&substream->runtime->hw_constraints;
288  	struct snd_mask *m;
289  	unsigned int k;
290  	struct snd_mask old_mask __maybe_unused;
291  	int changed;
292  
293  	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
294  		m = hw_param_mask(params, k);
295  		if (snd_mask_empty(m))
296  			return -EINVAL;
297  
298  		/* This parameter is not requested to change by a caller. */
299  		if (!(params->rmask & PARAM_MASK_BIT(k)))
300  			continue;
301  
302  		if (trace_hw_mask_param_enabled())
303  			old_mask = *m;
304  
305  		changed = snd_mask_refine(m, constrs_mask(constrs, k));
306  		if (changed < 0)
307  			return changed;
308  		if (changed == 0)
309  			continue;
310  
311  		/* Set corresponding flag so that the caller gets it. */
312  		trace_hw_mask_param(substream, k, 0, &old_mask, m);
313  		params->cmask |= PARAM_MASK_BIT(k);
314  	}
315  
316  	return 0;
317  }
318  
constrain_interval_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)319  static int constrain_interval_params(struct snd_pcm_substream *substream,
320  				     struct snd_pcm_hw_params *params)
321  {
322  	struct snd_pcm_hw_constraints *constrs =
323  					&substream->runtime->hw_constraints;
324  	struct snd_interval *i;
325  	unsigned int k;
326  	struct snd_interval old_interval __maybe_unused;
327  	int changed;
328  
329  	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
330  		i = hw_param_interval(params, k);
331  		if (snd_interval_empty(i))
332  			return -EINVAL;
333  
334  		/* This parameter is not requested to change by a caller. */
335  		if (!(params->rmask & PARAM_MASK_BIT(k)))
336  			continue;
337  
338  		if (trace_hw_interval_param_enabled())
339  			old_interval = *i;
340  
341  		changed = snd_interval_refine(i, constrs_interval(constrs, k));
342  		if (changed < 0)
343  			return changed;
344  		if (changed == 0)
345  			continue;
346  
347  		/* Set corresponding flag so that the caller gets it. */
348  		trace_hw_interval_param(substream, k, 0, &old_interval, i);
349  		params->cmask |= PARAM_MASK_BIT(k);
350  	}
351  
352  	return 0;
353  }
354  
constrain_params_by_rules(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)355  static int constrain_params_by_rules(struct snd_pcm_substream *substream,
356  				     struct snd_pcm_hw_params *params)
357  {
358  	struct snd_pcm_hw_constraints *constrs =
359  					&substream->runtime->hw_constraints;
360  	unsigned int k;
361  	unsigned int *rstamps __free(kfree) = NULL;
362  	unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
363  	unsigned int stamp;
364  	struct snd_pcm_hw_rule *r;
365  	unsigned int d;
366  	struct snd_mask old_mask __maybe_unused;
367  	struct snd_interval old_interval __maybe_unused;
368  	bool again;
369  	int changed, err = 0;
370  
371  	/*
372  	 * Each application of rule has own sequence number.
373  	 *
374  	 * Each member of 'rstamps' array represents the sequence number of
375  	 * recent application of corresponding rule.
376  	 */
377  	rstamps = kcalloc(constrs->rules_num, sizeof(unsigned int), GFP_KERNEL);
378  	if (!rstamps)
379  		return -ENOMEM;
380  
381  	/*
382  	 * Each member of 'vstamps' array represents the sequence number of
383  	 * recent application of rule in which corresponding parameters were
384  	 * changed.
385  	 *
386  	 * In initial state, elements corresponding to parameters requested by
387  	 * a caller is 1. For unrequested parameters, corresponding members
388  	 * have 0 so that the parameters are never changed anymore.
389  	 */
390  	for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
391  		vstamps[k] = (params->rmask & PARAM_MASK_BIT(k)) ? 1 : 0;
392  
393  	/* Due to the above design, actual sequence number starts at 2. */
394  	stamp = 2;
395  retry:
396  	/* Apply all rules in order. */
397  	again = false;
398  	for (k = 0; k < constrs->rules_num; k++) {
399  		r = &constrs->rules[k];
400  
401  		/*
402  		 * Check condition bits of this rule. When the rule has
403  		 * some condition bits, parameter without the bits is
404  		 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
405  		 * is an example of the condition bits.
406  		 */
407  		if (r->cond && !(r->cond & params->flags))
408  			continue;
409  
410  		/*
411  		 * The 'deps' array includes maximum four dependencies
412  		 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fifth
413  		 * member of this array is a sentinel and should be
414  		 * negative value.
415  		 *
416  		 * This rule should be processed in this time when dependent
417  		 * parameters were changed at former applications of the other
418  		 * rules.
419  		 */
420  		for (d = 0; r->deps[d] >= 0; d++) {
421  			if (vstamps[r->deps[d]] > rstamps[k])
422  				break;
423  		}
424  		if (r->deps[d] < 0)
425  			continue;
426  
427  		if (trace_hw_mask_param_enabled()) {
428  			if (hw_is_mask(r->var))
429  				old_mask = *hw_param_mask(params, r->var);
430  		}
431  		if (trace_hw_interval_param_enabled()) {
432  			if (hw_is_interval(r->var))
433  				old_interval = *hw_param_interval(params, r->var);
434  		}
435  
436  		changed = r->func(params, r);
437  		if (changed < 0)
438  			return changed;
439  
440  		/*
441  		 * When the parameter is changed, notify it to the caller
442  		 * by corresponding returned bit, then preparing for next
443  		 * iteration.
444  		 */
445  		if (changed && r->var >= 0) {
446  			if (hw_is_mask(r->var)) {
447  				trace_hw_mask_param(substream, r->var,
448  					k + 1, &old_mask,
449  					hw_param_mask(params, r->var));
450  			}
451  			if (hw_is_interval(r->var)) {
452  				trace_hw_interval_param(substream, r->var,
453  					k + 1, &old_interval,
454  					hw_param_interval(params, r->var));
455  			}
456  
457  			params->cmask |= PARAM_MASK_BIT(r->var);
458  			vstamps[r->var] = stamp;
459  			again = true;
460  		}
461  
462  		rstamps[k] = stamp++;
463  	}
464  
465  	/* Iterate to evaluate all rules till no parameters are changed. */
466  	if (again)
467  		goto retry;
468  
469  	return err;
470  }
471  
fixup_unreferenced_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)472  static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
473  				     struct snd_pcm_hw_params *params)
474  {
475  	const struct snd_interval *i;
476  	const struct snd_mask *m;
477  	struct snd_mask *m_rw;
478  	int err;
479  
480  	if (!params->msbits) {
481  		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
482  		if (snd_interval_single(i))
483  			params->msbits = snd_interval_value(i);
484  		m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
485  		if (snd_mask_single(m)) {
486  			snd_pcm_format_t format = (__force snd_pcm_format_t)snd_mask_min(m);
487  			params->msbits = snd_pcm_format_width(format);
488  		}
489  	}
490  
491  	if (params->msbits) {
492  		m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
493  		if (snd_mask_single(m)) {
494  			snd_pcm_format_t format = (__force snd_pcm_format_t)snd_mask_min(m);
495  
496  			if (snd_pcm_format_linear(format) &&
497  			    snd_pcm_format_width(format) != params->msbits) {
498  				m_rw = hw_param_mask(params, SNDRV_PCM_HW_PARAM_SUBFORMAT);
499  				snd_mask_reset(m_rw,
500  					       (__force unsigned)SNDRV_PCM_SUBFORMAT_MSBITS_MAX);
501  				if (snd_mask_empty(m_rw))
502  					return -EINVAL;
503  			}
504  		}
505  	}
506  
507  	if (!params->rate_den) {
508  		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
509  		if (snd_interval_single(i)) {
510  			params->rate_num = snd_interval_value(i);
511  			params->rate_den = 1;
512  		}
513  	}
514  
515  	if (!params->fifo_size) {
516  		m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
517  		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
518  		if (snd_mask_single(m) && snd_interval_single(i)) {
519  			err = snd_pcm_ops_ioctl(substream,
520  						SNDRV_PCM_IOCTL1_FIFO_SIZE,
521  						params);
522  			if (err < 0)
523  				return err;
524  		}
525  	}
526  
527  	if (!params->info) {
528  		params->info = substream->runtime->hw.info;
529  		params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
530  				  SNDRV_PCM_INFO_DRAIN_TRIGGER);
531  		if (!hw_support_mmap(substream))
532  			params->info &= ~(SNDRV_PCM_INFO_MMAP |
533  					  SNDRV_PCM_INFO_MMAP_VALID);
534  	}
535  
536  	err = snd_pcm_ops_ioctl(substream,
537  				SNDRV_PCM_IOCTL1_SYNC_ID,
538  				params);
539  	if (err < 0)
540  		return err;
541  
542  	return 0;
543  }
544  
snd_pcm_hw_refine(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)545  int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
546  		      struct snd_pcm_hw_params *params)
547  {
548  	int err;
549  
550  	params->info = 0;
551  	params->fifo_size = 0;
552  	if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
553  		params->msbits = 0;
554  	if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_RATE)) {
555  		params->rate_num = 0;
556  		params->rate_den = 0;
557  	}
558  
559  	err = constrain_mask_params(substream, params);
560  	if (err < 0)
561  		return err;
562  
563  	err = constrain_interval_params(substream, params);
564  	if (err < 0)
565  		return err;
566  
567  	err = constrain_params_by_rules(substream, params);
568  	if (err < 0)
569  		return err;
570  
571  	params->rmask = 0;
572  
573  	return 0;
574  }
575  EXPORT_SYMBOL(snd_pcm_hw_refine);
576  
snd_pcm_hw_refine_user(struct snd_pcm_substream * substream,struct snd_pcm_hw_params __user * _params)577  static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
578  				  struct snd_pcm_hw_params __user * _params)
579  {
580  	struct snd_pcm_hw_params *params __free(kfree) = NULL;
581  	int err;
582  
583  	params = memdup_user(_params, sizeof(*params));
584  	if (IS_ERR(params))
585  		return PTR_ERR(params);
586  
587  	err = snd_pcm_hw_refine(substream, params);
588  	if (err < 0)
589  		return err;
590  
591  	err = fixup_unreferenced_params(substream, params);
592  	if (err < 0)
593  		return err;
594  
595  	if (copy_to_user(_params, params, sizeof(*params)))
596  		return -EFAULT;
597  	return 0;
598  }
599  
period_to_usecs(struct snd_pcm_runtime * runtime)600  static int period_to_usecs(struct snd_pcm_runtime *runtime)
601  {
602  	int usecs;
603  
604  	if (! runtime->rate)
605  		return -1; /* invalid */
606  
607  	/* take 75% of period time as the deadline */
608  	usecs = (750000 / runtime->rate) * runtime->period_size;
609  	usecs += ((750000 % runtime->rate) * runtime->period_size) /
610  		runtime->rate;
611  
612  	return usecs;
613  }
614  
snd_pcm_set_state(struct snd_pcm_substream * substream,snd_pcm_state_t state)615  static void snd_pcm_set_state(struct snd_pcm_substream *substream,
616  			      snd_pcm_state_t state)
617  {
618  	guard(pcm_stream_lock_irq)(substream);
619  	if (substream->runtime->state != SNDRV_PCM_STATE_DISCONNECTED)
620  		__snd_pcm_set_state(substream->runtime, state);
621  }
622  
snd_pcm_timer_notify(struct snd_pcm_substream * substream,int event)623  static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
624  					int event)
625  {
626  #ifdef CONFIG_SND_PCM_TIMER
627  	if (substream->timer)
628  		snd_timer_notify(substream->timer, event,
629  					&substream->runtime->trigger_tstamp);
630  #endif
631  }
632  
snd_pcm_sync_stop(struct snd_pcm_substream * substream,bool sync_irq)633  void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq)
634  {
635  	if (substream->runtime && substream->runtime->stop_operating) {
636  		substream->runtime->stop_operating = false;
637  		if (substream->ops && substream->ops->sync_stop)
638  			substream->ops->sync_stop(substream);
639  		else if (sync_irq && substream->pcm->card->sync_irq > 0)
640  			synchronize_irq(substream->pcm->card->sync_irq);
641  	}
642  }
643  
644  /**
645   * snd_pcm_hw_params_choose - choose a configuration defined by @params
646   * @pcm: PCM instance
647   * @params: the hw_params instance
648   *
649   * Choose one configuration from configuration space defined by @params.
650   * The configuration chosen is that obtained fixing in this order:
651   * first access, first format, first subformat, min channels,
652   * min rate, min period time, max buffer size, min tick time
653   *
654   * Return: Zero if successful, or a negative error code on failure.
655   */
snd_pcm_hw_params_choose(struct snd_pcm_substream * pcm,struct snd_pcm_hw_params * params)656  static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
657  				    struct snd_pcm_hw_params *params)
658  {
659  	static const int vars[] = {
660  		SNDRV_PCM_HW_PARAM_ACCESS,
661  		SNDRV_PCM_HW_PARAM_FORMAT,
662  		SNDRV_PCM_HW_PARAM_SUBFORMAT,
663  		SNDRV_PCM_HW_PARAM_CHANNELS,
664  		SNDRV_PCM_HW_PARAM_RATE,
665  		SNDRV_PCM_HW_PARAM_PERIOD_TIME,
666  		SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
667  		SNDRV_PCM_HW_PARAM_TICK_TIME,
668  		-1
669  	};
670  	const int *v;
671  	struct snd_mask old_mask __maybe_unused;
672  	struct snd_interval old_interval __maybe_unused;
673  	int changed;
674  
675  	for (v = vars; *v != -1; v++) {
676  		/* Keep old parameter to trace. */
677  		if (trace_hw_mask_param_enabled()) {
678  			if (hw_is_mask(*v))
679  				old_mask = *hw_param_mask(params, *v);
680  		}
681  		if (trace_hw_interval_param_enabled()) {
682  			if (hw_is_interval(*v))
683  				old_interval = *hw_param_interval(params, *v);
684  		}
685  		if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
686  			changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
687  		else
688  			changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
689  		if (changed < 0)
690  			return changed;
691  		if (changed == 0)
692  			continue;
693  
694  		/* Trace the changed parameter. */
695  		if (hw_is_mask(*v)) {
696  			trace_hw_mask_param(pcm, *v, 0, &old_mask,
697  					    hw_param_mask(params, *v));
698  		}
699  		if (hw_is_interval(*v)) {
700  			trace_hw_interval_param(pcm, *v, 0, &old_interval,
701  						hw_param_interval(params, *v));
702  		}
703  	}
704  
705  	return 0;
706  }
707  
708  /* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
709   * block the further r/w operations
710   */
snd_pcm_buffer_access_lock(struct snd_pcm_runtime * runtime)711  static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
712  {
713  	if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
714  		return -EBUSY;
715  	mutex_lock(&runtime->buffer_mutex);
716  	return 0; /* keep buffer_mutex, unlocked by below */
717  }
718  
719  /* release buffer_mutex and clear r/w access flag */
snd_pcm_buffer_access_unlock(struct snd_pcm_runtime * runtime)720  static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
721  {
722  	mutex_unlock(&runtime->buffer_mutex);
723  	atomic_inc(&runtime->buffer_accessing);
724  }
725  
726  #if IS_ENABLED(CONFIG_SND_PCM_OSS)
727  #define is_oss_stream(substream)	((substream)->oss.oss)
728  #else
729  #define is_oss_stream(substream)	false
730  #endif
731  
snd_pcm_hw_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)732  static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
733  			     struct snd_pcm_hw_params *params)
734  {
735  	struct snd_pcm_runtime *runtime;
736  	int err, usecs;
737  	unsigned int bits;
738  	snd_pcm_uframes_t frames;
739  
740  	if (PCM_RUNTIME_CHECK(substream))
741  		return -ENXIO;
742  	runtime = substream->runtime;
743  	err = snd_pcm_buffer_access_lock(runtime);
744  	if (err < 0)
745  		return err;
746  	scoped_guard(pcm_stream_lock_irq, substream) {
747  		switch (runtime->state) {
748  		case SNDRV_PCM_STATE_OPEN:
749  		case SNDRV_PCM_STATE_SETUP:
750  		case SNDRV_PCM_STATE_PREPARED:
751  			if (!is_oss_stream(substream) &&
752  			    atomic_read(&substream->mmap_count))
753  				err = -EBADFD;
754  			break;
755  		default:
756  			err = -EBADFD;
757  			break;
758  		}
759  	}
760  	if (err)
761  		goto unlock;
762  
763  	snd_pcm_sync_stop(substream, true);
764  
765  	params->rmask = ~0U;
766  	err = snd_pcm_hw_refine(substream, params);
767  	if (err < 0)
768  		goto _error;
769  
770  	err = snd_pcm_hw_params_choose(substream, params);
771  	if (err < 0)
772  		goto _error;
773  
774  	err = fixup_unreferenced_params(substream, params);
775  	if (err < 0)
776  		goto _error;
777  
778  	if (substream->managed_buffer_alloc) {
779  		err = snd_pcm_lib_malloc_pages(substream,
780  					       params_buffer_bytes(params));
781  		if (err < 0)
782  			goto _error;
783  		runtime->buffer_changed = err > 0;
784  	}
785  
786  	if (substream->ops->hw_params != NULL) {
787  		err = substream->ops->hw_params(substream, params);
788  		if (err < 0)
789  			goto _error;
790  	}
791  
792  	runtime->access = params_access(params);
793  	runtime->format = params_format(params);
794  	runtime->subformat = params_subformat(params);
795  	runtime->channels = params_channels(params);
796  	runtime->rate = params_rate(params);
797  	runtime->period_size = params_period_size(params);
798  	runtime->periods = params_periods(params);
799  	runtime->buffer_size = params_buffer_size(params);
800  	runtime->info = params->info;
801  	runtime->rate_num = params->rate_num;
802  	runtime->rate_den = params->rate_den;
803  	runtime->no_period_wakeup =
804  			(params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
805  			(params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
806  
807  	bits = snd_pcm_format_physical_width(runtime->format);
808  	runtime->sample_bits = bits;
809  	bits *= runtime->channels;
810  	runtime->frame_bits = bits;
811  	frames = 1;
812  	while (bits % 8 != 0) {
813  		bits *= 2;
814  		frames *= 2;
815  	}
816  	runtime->byte_align = bits / 8;
817  	runtime->min_align = frames;
818  
819  	/* Default sw params */
820  	runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
821  	runtime->period_step = 1;
822  	runtime->control->avail_min = runtime->period_size;
823  	runtime->start_threshold = 1;
824  	runtime->stop_threshold = runtime->buffer_size;
825  	runtime->silence_threshold = 0;
826  	runtime->silence_size = 0;
827  	runtime->boundary = runtime->buffer_size;
828  	while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
829  		runtime->boundary *= 2;
830  
831  	/* clear the buffer for avoiding possible kernel info leaks */
832  	if (runtime->dma_area && !substream->ops->copy) {
833  		size_t size = runtime->dma_bytes;
834  
835  		if (runtime->info & SNDRV_PCM_INFO_MMAP)
836  			size = PAGE_ALIGN(size);
837  		memset(runtime->dma_area, 0, size);
838  	}
839  
840  	snd_pcm_timer_resolution_change(substream);
841  	snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
842  
843  	if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
844  		cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
845  	usecs = period_to_usecs(runtime);
846  	if (usecs >= 0)
847  		cpu_latency_qos_add_request(&substream->latency_pm_qos_req,
848  					    usecs);
849  	err = 0;
850   _error:
851  	if (err) {
852  		/* hardware might be unusable from this time,
853  		 * so we force application to retry to set
854  		 * the correct hardware parameter settings
855  		 */
856  		snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
857  		if (substream->ops->hw_free != NULL)
858  			substream->ops->hw_free(substream);
859  		if (substream->managed_buffer_alloc)
860  			snd_pcm_lib_free_pages(substream);
861  	}
862   unlock:
863  	snd_pcm_buffer_access_unlock(runtime);
864  	return err;
865  }
866  
snd_pcm_hw_params_user(struct snd_pcm_substream * substream,struct snd_pcm_hw_params __user * _params)867  static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
868  				  struct snd_pcm_hw_params __user * _params)
869  {
870  	struct snd_pcm_hw_params *params __free(kfree) = NULL;
871  	int err;
872  
873  	params = memdup_user(_params, sizeof(*params));
874  	if (IS_ERR(params))
875  		return PTR_ERR(params);
876  
877  	err = snd_pcm_hw_params(substream, params);
878  	if (err < 0)
879  		return err;
880  
881  	if (copy_to_user(_params, params, sizeof(*params)))
882  		return -EFAULT;
883  	return err;
884  }
885  
do_hw_free(struct snd_pcm_substream * substream)886  static int do_hw_free(struct snd_pcm_substream *substream)
887  {
888  	int result = 0;
889  
890  	snd_pcm_sync_stop(substream, true);
891  	if (substream->ops->hw_free)
892  		result = substream->ops->hw_free(substream);
893  	if (substream->managed_buffer_alloc)
894  		snd_pcm_lib_free_pages(substream);
895  	return result;
896  }
897  
snd_pcm_hw_free(struct snd_pcm_substream * substream)898  static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
899  {
900  	struct snd_pcm_runtime *runtime;
901  	int result = 0;
902  
903  	if (PCM_RUNTIME_CHECK(substream))
904  		return -ENXIO;
905  	runtime = substream->runtime;
906  	result = snd_pcm_buffer_access_lock(runtime);
907  	if (result < 0)
908  		return result;
909  	scoped_guard(pcm_stream_lock_irq, substream) {
910  		switch (runtime->state) {
911  		case SNDRV_PCM_STATE_SETUP:
912  		case SNDRV_PCM_STATE_PREPARED:
913  			if (atomic_read(&substream->mmap_count))
914  				result = -EBADFD;
915  			break;
916  		default:
917  			result = -EBADFD;
918  			break;
919  		}
920  	}
921  	if (result)
922  		goto unlock;
923  	result = do_hw_free(substream);
924  	snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
925  	cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
926   unlock:
927  	snd_pcm_buffer_access_unlock(runtime);
928  	return result;
929  }
930  
snd_pcm_sw_params(struct snd_pcm_substream * substream,struct snd_pcm_sw_params * params)931  static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
932  			     struct snd_pcm_sw_params *params)
933  {
934  	struct snd_pcm_runtime *runtime;
935  	int err;
936  
937  	if (PCM_RUNTIME_CHECK(substream))
938  		return -ENXIO;
939  	runtime = substream->runtime;
940  	scoped_guard(pcm_stream_lock_irq, substream) {
941  		if (runtime->state == SNDRV_PCM_STATE_OPEN)
942  			return -EBADFD;
943  	}
944  
945  	if (params->tstamp_mode < 0 ||
946  	    params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
947  		return -EINVAL;
948  	if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
949  	    params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
950  		return -EINVAL;
951  	if (params->avail_min == 0)
952  		return -EINVAL;
953  	if (params->silence_size >= runtime->boundary) {
954  		if (params->silence_threshold != 0)
955  			return -EINVAL;
956  	} else {
957  		if (params->silence_size > params->silence_threshold)
958  			return -EINVAL;
959  		if (params->silence_threshold > runtime->buffer_size)
960  			return -EINVAL;
961  	}
962  	err = 0;
963  	scoped_guard(pcm_stream_lock_irq, substream) {
964  		runtime->tstamp_mode = params->tstamp_mode;
965  		if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
966  			runtime->tstamp_type = params->tstamp_type;
967  		runtime->period_step = params->period_step;
968  		runtime->control->avail_min = params->avail_min;
969  		runtime->start_threshold = params->start_threshold;
970  		runtime->stop_threshold = params->stop_threshold;
971  		runtime->silence_threshold = params->silence_threshold;
972  		runtime->silence_size = params->silence_size;
973  		params->boundary = runtime->boundary;
974  		if (snd_pcm_running(substream)) {
975  			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
976  			    runtime->silence_size > 0)
977  				snd_pcm_playback_silence(substream, ULONG_MAX);
978  			err = snd_pcm_update_state(substream, runtime);
979  		}
980  	}
981  	return err;
982  }
983  
snd_pcm_sw_params_user(struct snd_pcm_substream * substream,struct snd_pcm_sw_params __user * _params)984  static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
985  				  struct snd_pcm_sw_params __user * _params)
986  {
987  	struct snd_pcm_sw_params params;
988  	int err;
989  	if (copy_from_user(&params, _params, sizeof(params)))
990  		return -EFAULT;
991  	err = snd_pcm_sw_params(substream, &params);
992  	if (copy_to_user(_params, &params, sizeof(params)))
993  		return -EFAULT;
994  	return err;
995  }
996  
997  static inline snd_pcm_uframes_t
snd_pcm_calc_delay(struct snd_pcm_substream * substream)998  snd_pcm_calc_delay(struct snd_pcm_substream *substream)
999  {
1000  	snd_pcm_uframes_t delay;
1001  
1002  	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
1003  		delay = snd_pcm_playback_hw_avail(substream->runtime);
1004  	else
1005  		delay = snd_pcm_capture_avail(substream->runtime);
1006  	return delay + substream->runtime->delay;
1007  }
1008  
snd_pcm_status64(struct snd_pcm_substream * substream,struct snd_pcm_status64 * status)1009  int snd_pcm_status64(struct snd_pcm_substream *substream,
1010  		     struct snd_pcm_status64 *status)
1011  {
1012  	struct snd_pcm_runtime *runtime = substream->runtime;
1013  
1014  	guard(pcm_stream_lock_irq)(substream);
1015  
1016  	snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
1017  					&runtime->audio_tstamp_config);
1018  
1019  	/* backwards compatible behavior */
1020  	if (runtime->audio_tstamp_config.type_requested ==
1021  		SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
1022  		if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
1023  			runtime->audio_tstamp_config.type_requested =
1024  				SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
1025  		else
1026  			runtime->audio_tstamp_config.type_requested =
1027  				SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
1028  		runtime->audio_tstamp_report.valid = 0;
1029  	} else
1030  		runtime->audio_tstamp_report.valid = 1;
1031  
1032  	status->state = runtime->state;
1033  	status->suspended_state = runtime->suspended_state;
1034  	if (status->state == SNDRV_PCM_STATE_OPEN)
1035  		return 0;
1036  	status->trigger_tstamp_sec = runtime->trigger_tstamp.tv_sec;
1037  	status->trigger_tstamp_nsec = runtime->trigger_tstamp.tv_nsec;
1038  	if (snd_pcm_running(substream)) {
1039  		snd_pcm_update_hw_ptr(substream);
1040  		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
1041  			status->tstamp_sec = runtime->status->tstamp.tv_sec;
1042  			status->tstamp_nsec =
1043  				runtime->status->tstamp.tv_nsec;
1044  			status->driver_tstamp_sec =
1045  				runtime->driver_tstamp.tv_sec;
1046  			status->driver_tstamp_nsec =
1047  				runtime->driver_tstamp.tv_nsec;
1048  			status->audio_tstamp_sec =
1049  				runtime->status->audio_tstamp.tv_sec;
1050  			status->audio_tstamp_nsec =
1051  				runtime->status->audio_tstamp.tv_nsec;
1052  			if (runtime->audio_tstamp_report.valid == 1)
1053  				/* backwards compatibility, no report provided in COMPAT mode */
1054  				snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
1055  								&status->audio_tstamp_accuracy,
1056  								&runtime->audio_tstamp_report);
1057  
1058  			goto _tstamp_end;
1059  		}
1060  	} else {
1061  		/* get tstamp only in fallback mode and only if enabled */
1062  		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
1063  			struct timespec64 tstamp;
1064  
1065  			snd_pcm_gettime(runtime, &tstamp);
1066  			status->tstamp_sec = tstamp.tv_sec;
1067  			status->tstamp_nsec = tstamp.tv_nsec;
1068  		}
1069  	}
1070   _tstamp_end:
1071  	status->appl_ptr = runtime->control->appl_ptr;
1072  	status->hw_ptr = runtime->status->hw_ptr;
1073  	status->avail = snd_pcm_avail(substream);
1074  	status->delay = snd_pcm_running(substream) ?
1075  		snd_pcm_calc_delay(substream) : 0;
1076  	status->avail_max = runtime->avail_max;
1077  	status->overrange = runtime->overrange;
1078  	runtime->avail_max = 0;
1079  	runtime->overrange = 0;
1080  	return 0;
1081  }
1082  
snd_pcm_status_user64(struct snd_pcm_substream * substream,struct snd_pcm_status64 __user * _status,bool ext)1083  static int snd_pcm_status_user64(struct snd_pcm_substream *substream,
1084  				 struct snd_pcm_status64 __user * _status,
1085  				 bool ext)
1086  {
1087  	struct snd_pcm_status64 status;
1088  	int res;
1089  
1090  	memset(&status, 0, sizeof(status));
1091  	/*
1092  	 * with extension, parameters are read/write,
1093  	 * get audio_tstamp_data from user,
1094  	 * ignore rest of status structure
1095  	 */
1096  	if (ext && get_user(status.audio_tstamp_data,
1097  				(u32 __user *)(&_status->audio_tstamp_data)))
1098  		return -EFAULT;
1099  	res = snd_pcm_status64(substream, &status);
1100  	if (res < 0)
1101  		return res;
1102  	if (copy_to_user(_status, &status, sizeof(status)))
1103  		return -EFAULT;
1104  	return 0;
1105  }
1106  
snd_pcm_status_user32(struct snd_pcm_substream * substream,struct snd_pcm_status32 __user * _status,bool ext)1107  static int snd_pcm_status_user32(struct snd_pcm_substream *substream,
1108  				 struct snd_pcm_status32 __user * _status,
1109  				 bool ext)
1110  {
1111  	struct snd_pcm_status64 status64;
1112  	struct snd_pcm_status32 status32;
1113  	int res;
1114  
1115  	memset(&status64, 0, sizeof(status64));
1116  	memset(&status32, 0, sizeof(status32));
1117  	/*
1118  	 * with extension, parameters are read/write,
1119  	 * get audio_tstamp_data from user,
1120  	 * ignore rest of status structure
1121  	 */
1122  	if (ext && get_user(status64.audio_tstamp_data,
1123  			    (u32 __user *)(&_status->audio_tstamp_data)))
1124  		return -EFAULT;
1125  	res = snd_pcm_status64(substream, &status64);
1126  	if (res < 0)
1127  		return res;
1128  
1129  	status32 = (struct snd_pcm_status32) {
1130  		.state = status64.state,
1131  		.trigger_tstamp_sec = status64.trigger_tstamp_sec,
1132  		.trigger_tstamp_nsec = status64.trigger_tstamp_nsec,
1133  		.tstamp_sec = status64.tstamp_sec,
1134  		.tstamp_nsec = status64.tstamp_nsec,
1135  		.appl_ptr = status64.appl_ptr,
1136  		.hw_ptr = status64.hw_ptr,
1137  		.delay = status64.delay,
1138  		.avail = status64.avail,
1139  		.avail_max = status64.avail_max,
1140  		.overrange = status64.overrange,
1141  		.suspended_state = status64.suspended_state,
1142  		.audio_tstamp_data = status64.audio_tstamp_data,
1143  		.audio_tstamp_sec = status64.audio_tstamp_sec,
1144  		.audio_tstamp_nsec = status64.audio_tstamp_nsec,
1145  		.driver_tstamp_sec = status64.audio_tstamp_sec,
1146  		.driver_tstamp_nsec = status64.audio_tstamp_nsec,
1147  		.audio_tstamp_accuracy = status64.audio_tstamp_accuracy,
1148  	};
1149  
1150  	if (copy_to_user(_status, &status32, sizeof(status32)))
1151  		return -EFAULT;
1152  
1153  	return 0;
1154  }
1155  
snd_pcm_channel_info(struct snd_pcm_substream * substream,struct snd_pcm_channel_info * info)1156  static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
1157  				struct snd_pcm_channel_info * info)
1158  {
1159  	struct snd_pcm_runtime *runtime;
1160  	unsigned int channel;
1161  
1162  	channel = info->channel;
1163  	runtime = substream->runtime;
1164  	scoped_guard(pcm_stream_lock_irq, substream) {
1165  		if (runtime->state == SNDRV_PCM_STATE_OPEN)
1166  			return -EBADFD;
1167  	}
1168  	if (channel >= runtime->channels)
1169  		return -EINVAL;
1170  	memset(info, 0, sizeof(*info));
1171  	info->channel = channel;
1172  	return snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
1173  }
1174  
snd_pcm_channel_info_user(struct snd_pcm_substream * substream,struct snd_pcm_channel_info __user * _info)1175  static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
1176  				     struct snd_pcm_channel_info __user * _info)
1177  {
1178  	struct snd_pcm_channel_info info;
1179  	int res;
1180  
1181  	if (copy_from_user(&info, _info, sizeof(info)))
1182  		return -EFAULT;
1183  	res = snd_pcm_channel_info(substream, &info);
1184  	if (res < 0)
1185  		return res;
1186  	if (copy_to_user(_info, &info, sizeof(info)))
1187  		return -EFAULT;
1188  	return 0;
1189  }
1190  
snd_pcm_trigger_tstamp(struct snd_pcm_substream * substream)1191  static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
1192  {
1193  	struct snd_pcm_runtime *runtime = substream->runtime;
1194  	if (runtime->trigger_master == NULL)
1195  		return;
1196  	if (runtime->trigger_master == substream) {
1197  		if (!runtime->trigger_tstamp_latched)
1198  			snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
1199  	} else {
1200  		snd_pcm_trigger_tstamp(runtime->trigger_master);
1201  		runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
1202  	}
1203  	runtime->trigger_master = NULL;
1204  }
1205  
1206  #define ACTION_ARG_IGNORE	(__force snd_pcm_state_t)0
1207  
1208  struct action_ops {
1209  	int (*pre_action)(struct snd_pcm_substream *substream,
1210  			  snd_pcm_state_t state);
1211  	int (*do_action)(struct snd_pcm_substream *substream,
1212  			 snd_pcm_state_t state);
1213  	void (*undo_action)(struct snd_pcm_substream *substream,
1214  			    snd_pcm_state_t state);
1215  	void (*post_action)(struct snd_pcm_substream *substream,
1216  			    snd_pcm_state_t state);
1217  };
1218  
1219  /*
1220   *  this functions is core for handling of linked stream
1221   *  Note: the stream state might be changed also on failure
1222   *  Note2: call with calling stream lock + link lock
1223   */
snd_pcm_action_group(const struct action_ops * ops,struct snd_pcm_substream * substream,snd_pcm_state_t state,bool stream_lock)1224  static int snd_pcm_action_group(const struct action_ops *ops,
1225  				struct snd_pcm_substream *substream,
1226  				snd_pcm_state_t state,
1227  				bool stream_lock)
1228  {
1229  	struct snd_pcm_substream *s = NULL;
1230  	struct snd_pcm_substream *s1;
1231  	int res = 0, depth = 1;
1232  
1233  	snd_pcm_group_for_each_entry(s, substream) {
1234  		if (s != substream) {
1235  			if (!stream_lock)
1236  				mutex_lock_nested(&s->runtime->buffer_mutex, depth);
1237  			else if (s->pcm->nonatomic)
1238  				mutex_lock_nested(&s->self_group.mutex, depth);
1239  			else
1240  				spin_lock_nested(&s->self_group.lock, depth);
1241  			depth++;
1242  		}
1243  		res = ops->pre_action(s, state);
1244  		if (res < 0)
1245  			goto _unlock;
1246  	}
1247  	snd_pcm_group_for_each_entry(s, substream) {
1248  		res = ops->do_action(s, state);
1249  		if (res < 0) {
1250  			if (ops->undo_action) {
1251  				snd_pcm_group_for_each_entry(s1, substream) {
1252  					if (s1 == s) /* failed stream */
1253  						break;
1254  					ops->undo_action(s1, state);
1255  				}
1256  			}
1257  			s = NULL; /* unlock all */
1258  			goto _unlock;
1259  		}
1260  	}
1261  	snd_pcm_group_for_each_entry(s, substream) {
1262  		ops->post_action(s, state);
1263  	}
1264   _unlock:
1265  	/* unlock streams */
1266  	snd_pcm_group_for_each_entry(s1, substream) {
1267  		if (s1 != substream) {
1268  			if (!stream_lock)
1269  				mutex_unlock(&s1->runtime->buffer_mutex);
1270  			else if (s1->pcm->nonatomic)
1271  				mutex_unlock(&s1->self_group.mutex);
1272  			else
1273  				spin_unlock(&s1->self_group.lock);
1274  		}
1275  		if (s1 == s)	/* end */
1276  			break;
1277  	}
1278  	return res;
1279  }
1280  
1281  /*
1282   *  Note: call with stream lock
1283   */
snd_pcm_action_single(const struct action_ops * ops,struct snd_pcm_substream * substream,snd_pcm_state_t state)1284  static int snd_pcm_action_single(const struct action_ops *ops,
1285  				 struct snd_pcm_substream *substream,
1286  				 snd_pcm_state_t state)
1287  {
1288  	int res;
1289  
1290  	res = ops->pre_action(substream, state);
1291  	if (res < 0)
1292  		return res;
1293  	res = ops->do_action(substream, state);
1294  	if (res == 0)
1295  		ops->post_action(substream, state);
1296  	else if (ops->undo_action)
1297  		ops->undo_action(substream, state);
1298  	return res;
1299  }
1300  
snd_pcm_group_assign(struct snd_pcm_substream * substream,struct snd_pcm_group * new_group)1301  static void snd_pcm_group_assign(struct snd_pcm_substream *substream,
1302  				 struct snd_pcm_group *new_group)
1303  {
1304  	substream->group = new_group;
1305  	list_move(&substream->link_list, &new_group->substreams);
1306  }
1307  
1308  /*
1309   * Unref and unlock the group, but keep the stream lock;
1310   * when the group becomes empty and no longer referred, destroy itself
1311   */
snd_pcm_group_unref(struct snd_pcm_group * group,struct snd_pcm_substream * substream)1312  static void snd_pcm_group_unref(struct snd_pcm_group *group,
1313  				struct snd_pcm_substream *substream)
1314  {
1315  	bool do_free;
1316  
1317  	if (!group)
1318  		return;
1319  	do_free = refcount_dec_and_test(&group->refs);
1320  	snd_pcm_group_unlock(group, substream->pcm->nonatomic);
1321  	if (do_free)
1322  		kfree(group);
1323  }
1324  
1325  /*
1326   * Lock the group inside a stream lock and reference it;
1327   * return the locked group object, or NULL if not linked
1328   */
1329  static struct snd_pcm_group *
snd_pcm_stream_group_ref(struct snd_pcm_substream * substream)1330  snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
1331  {
1332  	bool nonatomic = substream->pcm->nonatomic;
1333  	struct snd_pcm_group *group;
1334  	bool trylock;
1335  
1336  	for (;;) {
1337  		if (!snd_pcm_stream_linked(substream))
1338  			return NULL;
1339  		group = substream->group;
1340  		/* block freeing the group object */
1341  		refcount_inc(&group->refs);
1342  
1343  		trylock = nonatomic ? mutex_trylock(&group->mutex) :
1344  			spin_trylock(&group->lock);
1345  		if (trylock)
1346  			break; /* OK */
1347  
1348  		/* re-lock for avoiding ABBA deadlock */
1349  		snd_pcm_stream_unlock(substream);
1350  		snd_pcm_group_lock(group, nonatomic);
1351  		snd_pcm_stream_lock(substream);
1352  
1353  		/* check the group again; the above opens a small race window */
1354  		if (substream->group == group)
1355  			break; /* OK */
1356  		/* group changed, try again */
1357  		snd_pcm_group_unref(group, substream);
1358  	}
1359  	return group;
1360  }
1361  
1362  /*
1363   *  Note: call with stream lock
1364   */
snd_pcm_action(const struct action_ops * ops,struct snd_pcm_substream * substream,snd_pcm_state_t state)1365  static int snd_pcm_action(const struct action_ops *ops,
1366  			  struct snd_pcm_substream *substream,
1367  			  snd_pcm_state_t state)
1368  {
1369  	struct snd_pcm_group *group;
1370  	int res;
1371  
1372  	group = snd_pcm_stream_group_ref(substream);
1373  	if (group)
1374  		res = snd_pcm_action_group(ops, substream, state, true);
1375  	else
1376  		res = snd_pcm_action_single(ops, substream, state);
1377  	snd_pcm_group_unref(group, substream);
1378  	return res;
1379  }
1380  
1381  /*
1382   *  Note: don't use any locks before
1383   */
snd_pcm_action_lock_irq(const struct action_ops * ops,struct snd_pcm_substream * substream,snd_pcm_state_t state)1384  static int snd_pcm_action_lock_irq(const struct action_ops *ops,
1385  				   struct snd_pcm_substream *substream,
1386  				   snd_pcm_state_t state)
1387  {
1388  	guard(pcm_stream_lock_irq)(substream);
1389  	return snd_pcm_action(ops, substream, state);
1390  }
1391  
1392  /*
1393   */
snd_pcm_action_nonatomic(const struct action_ops * ops,struct snd_pcm_substream * substream,snd_pcm_state_t state)1394  static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1395  				    struct snd_pcm_substream *substream,
1396  				    snd_pcm_state_t state)
1397  {
1398  	int res;
1399  
1400  	/* Guarantee the group members won't change during non-atomic action */
1401  	guard(rwsem_read)(&snd_pcm_link_rwsem);
1402  	res = snd_pcm_buffer_access_lock(substream->runtime);
1403  	if (res < 0)
1404  		return res;
1405  	if (snd_pcm_stream_linked(substream))
1406  		res = snd_pcm_action_group(ops, substream, state, false);
1407  	else
1408  		res = snd_pcm_action_single(ops, substream, state);
1409  	snd_pcm_buffer_access_unlock(substream->runtime);
1410  	return res;
1411  }
1412  
1413  /*
1414   * start callbacks
1415   */
snd_pcm_pre_start(struct snd_pcm_substream * substream,snd_pcm_state_t state)1416  static int snd_pcm_pre_start(struct snd_pcm_substream *substream,
1417  			     snd_pcm_state_t state)
1418  {
1419  	struct snd_pcm_runtime *runtime = substream->runtime;
1420  	if (runtime->state != SNDRV_PCM_STATE_PREPARED)
1421  		return -EBADFD;
1422  	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1423  	    !snd_pcm_playback_data(substream))
1424  		return -EPIPE;
1425  	runtime->trigger_tstamp_latched = false;
1426  	runtime->trigger_master = substream;
1427  	return 0;
1428  }
1429  
snd_pcm_do_start(struct snd_pcm_substream * substream,snd_pcm_state_t state)1430  static int snd_pcm_do_start(struct snd_pcm_substream *substream,
1431  			    snd_pcm_state_t state)
1432  {
1433  	int err;
1434  
1435  	if (substream->runtime->trigger_master != substream)
1436  		return 0;
1437  	err = substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
1438  	/* XRUN happened during the start */
1439  	if (err == -EPIPE)
1440  		__snd_pcm_set_state(substream->runtime, SNDRV_PCM_STATE_XRUN);
1441  	return err;
1442  }
1443  
snd_pcm_undo_start(struct snd_pcm_substream * substream,snd_pcm_state_t state)1444  static void snd_pcm_undo_start(struct snd_pcm_substream *substream,
1445  			       snd_pcm_state_t state)
1446  {
1447  	if (substream->runtime->trigger_master == substream) {
1448  		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1449  		substream->runtime->stop_operating = true;
1450  	}
1451  }
1452  
snd_pcm_post_start(struct snd_pcm_substream * substream,snd_pcm_state_t state)1453  static void snd_pcm_post_start(struct snd_pcm_substream *substream,
1454  			       snd_pcm_state_t state)
1455  {
1456  	struct snd_pcm_runtime *runtime = substream->runtime;
1457  	snd_pcm_trigger_tstamp(substream);
1458  	runtime->hw_ptr_jiffies = jiffies;
1459  	runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
1460  							    runtime->rate;
1461  	__snd_pcm_set_state(runtime, state);
1462  	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1463  	    runtime->silence_size > 0)
1464  		snd_pcm_playback_silence(substream, ULONG_MAX);
1465  	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
1466  }
1467  
1468  static const struct action_ops snd_pcm_action_start = {
1469  	.pre_action = snd_pcm_pre_start,
1470  	.do_action = snd_pcm_do_start,
1471  	.undo_action = snd_pcm_undo_start,
1472  	.post_action = snd_pcm_post_start
1473  };
1474  
1475  /**
1476   * snd_pcm_start - start all linked streams
1477   * @substream: the PCM substream instance
1478   *
1479   * Return: Zero if successful, or a negative error code.
1480   * The stream lock must be acquired before calling this function.
1481   */
snd_pcm_start(struct snd_pcm_substream * substream)1482  int snd_pcm_start(struct snd_pcm_substream *substream)
1483  {
1484  	return snd_pcm_action(&snd_pcm_action_start, substream,
1485  			      SNDRV_PCM_STATE_RUNNING);
1486  }
1487  
1488  /* take the stream lock and start the streams */
snd_pcm_start_lock_irq(struct snd_pcm_substream * substream)1489  static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
1490  {
1491  	return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
1492  				       SNDRV_PCM_STATE_RUNNING);
1493  }
1494  
1495  /*
1496   * stop callbacks
1497   */
snd_pcm_pre_stop(struct snd_pcm_substream * substream,snd_pcm_state_t state)1498  static int snd_pcm_pre_stop(struct snd_pcm_substream *substream,
1499  			    snd_pcm_state_t state)
1500  {
1501  	struct snd_pcm_runtime *runtime = substream->runtime;
1502  	if (runtime->state == SNDRV_PCM_STATE_OPEN)
1503  		return -EBADFD;
1504  	runtime->trigger_master = substream;
1505  	return 0;
1506  }
1507  
snd_pcm_do_stop(struct snd_pcm_substream * substream,snd_pcm_state_t state)1508  static int snd_pcm_do_stop(struct snd_pcm_substream *substream,
1509  			   snd_pcm_state_t state)
1510  {
1511  	if (substream->runtime->trigger_master == substream &&
1512  	    snd_pcm_running(substream)) {
1513  		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1514  		substream->runtime->stop_operating = true;
1515  	}
1516  	return 0; /* unconditionally stop all substreams */
1517  }
1518  
snd_pcm_post_stop(struct snd_pcm_substream * substream,snd_pcm_state_t state)1519  static void snd_pcm_post_stop(struct snd_pcm_substream *substream,
1520  			      snd_pcm_state_t state)
1521  {
1522  	struct snd_pcm_runtime *runtime = substream->runtime;
1523  	if (runtime->state != state) {
1524  		snd_pcm_trigger_tstamp(substream);
1525  		__snd_pcm_set_state(runtime, state);
1526  		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
1527  	}
1528  	wake_up(&runtime->sleep);
1529  	wake_up(&runtime->tsleep);
1530  }
1531  
1532  static const struct action_ops snd_pcm_action_stop = {
1533  	.pre_action = snd_pcm_pre_stop,
1534  	.do_action = snd_pcm_do_stop,
1535  	.post_action = snd_pcm_post_stop
1536  };
1537  
1538  /**
1539   * snd_pcm_stop - try to stop all running streams in the substream group
1540   * @substream: the PCM substream instance
1541   * @state: PCM state after stopping the stream
1542   *
1543   * The state of each stream is then changed to the given state unconditionally.
1544   *
1545   * Return: Zero if successful, or a negative error code.
1546   */
snd_pcm_stop(struct snd_pcm_substream * substream,snd_pcm_state_t state)1547  int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
1548  {
1549  	return snd_pcm_action(&snd_pcm_action_stop, substream, state);
1550  }
1551  EXPORT_SYMBOL(snd_pcm_stop);
1552  
1553  /**
1554   * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1555   * @substream: the PCM substream
1556   *
1557   * After stopping, the state is changed to SETUP.
1558   * Unlike snd_pcm_stop(), this affects only the given stream.
1559   *
1560   * Return: Zero if successful, or a negative error code.
1561   */
snd_pcm_drain_done(struct snd_pcm_substream * substream)1562  int snd_pcm_drain_done(struct snd_pcm_substream *substream)
1563  {
1564  	return snd_pcm_action_single(&snd_pcm_action_stop, substream,
1565  				     SNDRV_PCM_STATE_SETUP);
1566  }
1567  
1568  /**
1569   * snd_pcm_stop_xrun - stop the running streams as XRUN
1570   * @substream: the PCM substream instance
1571   *
1572   * This stops the given running substream (and all linked substreams) as XRUN.
1573   * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1574   *
1575   * Return: Zero if successful, or a negative error code.
1576   */
snd_pcm_stop_xrun(struct snd_pcm_substream * substream)1577  int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
1578  {
1579  	guard(pcm_stream_lock_irqsave)(substream);
1580  	if (substream->runtime && snd_pcm_running(substream))
1581  		__snd_pcm_xrun(substream);
1582  	return 0;
1583  }
1584  EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
1585  
1586  /*
1587   * pause callbacks: pass boolean (to start pause or resume) as state argument
1588   */
1589  #define pause_pushed(state)	(__force bool)(state)
1590  
snd_pcm_pre_pause(struct snd_pcm_substream * substream,snd_pcm_state_t state)1591  static int snd_pcm_pre_pause(struct snd_pcm_substream *substream,
1592  			     snd_pcm_state_t state)
1593  {
1594  	struct snd_pcm_runtime *runtime = substream->runtime;
1595  	if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
1596  		return -ENOSYS;
1597  	if (pause_pushed(state)) {
1598  		if (runtime->state != SNDRV_PCM_STATE_RUNNING)
1599  			return -EBADFD;
1600  	} else if (runtime->state != SNDRV_PCM_STATE_PAUSED)
1601  		return -EBADFD;
1602  	runtime->trigger_master = substream;
1603  	return 0;
1604  }
1605  
snd_pcm_do_pause(struct snd_pcm_substream * substream,snd_pcm_state_t state)1606  static int snd_pcm_do_pause(struct snd_pcm_substream *substream,
1607  			    snd_pcm_state_t state)
1608  {
1609  	if (substream->runtime->trigger_master != substream)
1610  		return 0;
1611  	/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1612  	 * a delta between the current jiffies, this gives a large enough
1613  	 * delta, effectively to skip the check once.
1614  	 */
1615  	substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
1616  	return substream->ops->trigger(substream,
1617  				       pause_pushed(state) ?
1618  				       SNDRV_PCM_TRIGGER_PAUSE_PUSH :
1619  				       SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
1620  }
1621  
snd_pcm_undo_pause(struct snd_pcm_substream * substream,snd_pcm_state_t state)1622  static void snd_pcm_undo_pause(struct snd_pcm_substream *substream,
1623  			       snd_pcm_state_t state)
1624  {
1625  	if (substream->runtime->trigger_master == substream)
1626  		substream->ops->trigger(substream,
1627  					pause_pushed(state) ?
1628  					SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
1629  					SNDRV_PCM_TRIGGER_PAUSE_PUSH);
1630  }
1631  
snd_pcm_post_pause(struct snd_pcm_substream * substream,snd_pcm_state_t state)1632  static void snd_pcm_post_pause(struct snd_pcm_substream *substream,
1633  			       snd_pcm_state_t state)
1634  {
1635  	struct snd_pcm_runtime *runtime = substream->runtime;
1636  	snd_pcm_trigger_tstamp(substream);
1637  	if (pause_pushed(state)) {
1638  		__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_PAUSED);
1639  		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
1640  		wake_up(&runtime->sleep);
1641  		wake_up(&runtime->tsleep);
1642  	} else {
1643  		__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_RUNNING);
1644  		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
1645  	}
1646  }
1647  
1648  static const struct action_ops snd_pcm_action_pause = {
1649  	.pre_action = snd_pcm_pre_pause,
1650  	.do_action = snd_pcm_do_pause,
1651  	.undo_action = snd_pcm_undo_pause,
1652  	.post_action = snd_pcm_post_pause
1653  };
1654  
1655  /*
1656   * Push/release the pause for all linked streams.
1657   */
snd_pcm_pause(struct snd_pcm_substream * substream,bool push)1658  static int snd_pcm_pause(struct snd_pcm_substream *substream, bool push)
1659  {
1660  	return snd_pcm_action(&snd_pcm_action_pause, substream,
1661  			      (__force snd_pcm_state_t)push);
1662  }
1663  
snd_pcm_pause_lock_irq(struct snd_pcm_substream * substream,bool push)1664  static int snd_pcm_pause_lock_irq(struct snd_pcm_substream *substream,
1665  				  bool push)
1666  {
1667  	return snd_pcm_action_lock_irq(&snd_pcm_action_pause, substream,
1668  				       (__force snd_pcm_state_t)push);
1669  }
1670  
1671  #ifdef CONFIG_PM
1672  /* suspend callback: state argument ignored */
1673  
snd_pcm_pre_suspend(struct snd_pcm_substream * substream,snd_pcm_state_t state)1674  static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream,
1675  			       snd_pcm_state_t state)
1676  {
1677  	struct snd_pcm_runtime *runtime = substream->runtime;
1678  	switch (runtime->state) {
1679  	case SNDRV_PCM_STATE_SUSPENDED:
1680  		return -EBUSY;
1681  	/* unresumable PCM state; return -EBUSY for skipping suspend */
1682  	case SNDRV_PCM_STATE_OPEN:
1683  	case SNDRV_PCM_STATE_SETUP:
1684  	case SNDRV_PCM_STATE_DISCONNECTED:
1685  		return -EBUSY;
1686  	}
1687  	runtime->trigger_master = substream;
1688  	return 0;
1689  }
1690  
snd_pcm_do_suspend(struct snd_pcm_substream * substream,snd_pcm_state_t state)1691  static int snd_pcm_do_suspend(struct snd_pcm_substream *substream,
1692  			      snd_pcm_state_t state)
1693  {
1694  	struct snd_pcm_runtime *runtime = substream->runtime;
1695  	if (runtime->trigger_master != substream)
1696  		return 0;
1697  	if (! snd_pcm_running(substream))
1698  		return 0;
1699  	substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1700  	runtime->stop_operating = true;
1701  	return 0; /* suspend unconditionally */
1702  }
1703  
snd_pcm_post_suspend(struct snd_pcm_substream * substream,snd_pcm_state_t state)1704  static void snd_pcm_post_suspend(struct snd_pcm_substream *substream,
1705  				 snd_pcm_state_t state)
1706  {
1707  	struct snd_pcm_runtime *runtime = substream->runtime;
1708  	snd_pcm_trigger_tstamp(substream);
1709  	runtime->suspended_state = runtime->state;
1710  	runtime->status->suspended_state = runtime->suspended_state;
1711  	__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_SUSPENDED);
1712  	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
1713  	wake_up(&runtime->sleep);
1714  	wake_up(&runtime->tsleep);
1715  }
1716  
1717  static const struct action_ops snd_pcm_action_suspend = {
1718  	.pre_action = snd_pcm_pre_suspend,
1719  	.do_action = snd_pcm_do_suspend,
1720  	.post_action = snd_pcm_post_suspend
1721  };
1722  
1723  /*
1724   * snd_pcm_suspend - trigger SUSPEND to all linked streams
1725   * @substream: the PCM substream
1726   *
1727   * After this call, all streams are changed to SUSPENDED state.
1728   *
1729   * Return: Zero if successful, or a negative error code.
1730   */
snd_pcm_suspend(struct snd_pcm_substream * substream)1731  static int snd_pcm_suspend(struct snd_pcm_substream *substream)
1732  {
1733  	guard(pcm_stream_lock_irqsave)(substream);
1734  	return snd_pcm_action(&snd_pcm_action_suspend, substream,
1735  			      ACTION_ARG_IGNORE);
1736  }
1737  
1738  /**
1739   * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1740   * @pcm: the PCM instance
1741   *
1742   * After this call, all streams are changed to SUSPENDED state.
1743   *
1744   * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1745   */
snd_pcm_suspend_all(struct snd_pcm * pcm)1746  int snd_pcm_suspend_all(struct snd_pcm *pcm)
1747  {
1748  	struct snd_pcm_substream *substream;
1749  	int stream, err = 0;
1750  
1751  	if (! pcm)
1752  		return 0;
1753  
1754  	for_each_pcm_substream(pcm, stream, substream) {
1755  		/* FIXME: the open/close code should lock this as well */
1756  		if (!substream->runtime)
1757  			continue;
1758  
1759  		/*
1760  		 * Skip BE dai link PCM's that are internal and may
1761  		 * not have their substream ops set.
1762  		 */
1763  		if (!substream->ops)
1764  			continue;
1765  
1766  		err = snd_pcm_suspend(substream);
1767  		if (err < 0 && err != -EBUSY)
1768  			return err;
1769  	}
1770  
1771  	for_each_pcm_substream(pcm, stream, substream)
1772  		snd_pcm_sync_stop(substream, false);
1773  
1774  	return 0;
1775  }
1776  EXPORT_SYMBOL(snd_pcm_suspend_all);
1777  
1778  /* resume callbacks: state argument ignored */
1779  
snd_pcm_pre_resume(struct snd_pcm_substream * substream,snd_pcm_state_t state)1780  static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
1781  			      snd_pcm_state_t state)
1782  {
1783  	struct snd_pcm_runtime *runtime = substream->runtime;
1784  	if (runtime->state != SNDRV_PCM_STATE_SUSPENDED)
1785  		return -EBADFD;
1786  	if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
1787  		return -ENOSYS;
1788  	runtime->trigger_master = substream;
1789  	return 0;
1790  }
1791  
snd_pcm_do_resume(struct snd_pcm_substream * substream,snd_pcm_state_t state)1792  static int snd_pcm_do_resume(struct snd_pcm_substream *substream,
1793  			     snd_pcm_state_t state)
1794  {
1795  	struct snd_pcm_runtime *runtime = substream->runtime;
1796  	if (runtime->trigger_master != substream)
1797  		return 0;
1798  	/* DMA not running previously? */
1799  	if (runtime->suspended_state != SNDRV_PCM_STATE_RUNNING &&
1800  	    (runtime->suspended_state != SNDRV_PCM_STATE_DRAINING ||
1801  	     substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
1802  		return 0;
1803  	return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
1804  }
1805  
snd_pcm_undo_resume(struct snd_pcm_substream * substream,snd_pcm_state_t state)1806  static void snd_pcm_undo_resume(struct snd_pcm_substream *substream,
1807  				snd_pcm_state_t state)
1808  {
1809  	if (substream->runtime->trigger_master == substream &&
1810  	    snd_pcm_running(substream))
1811  		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1812  }
1813  
snd_pcm_post_resume(struct snd_pcm_substream * substream,snd_pcm_state_t state)1814  static void snd_pcm_post_resume(struct snd_pcm_substream *substream,
1815  				snd_pcm_state_t state)
1816  {
1817  	struct snd_pcm_runtime *runtime = substream->runtime;
1818  	snd_pcm_trigger_tstamp(substream);
1819  	__snd_pcm_set_state(runtime, runtime->suspended_state);
1820  	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
1821  }
1822  
1823  static const struct action_ops snd_pcm_action_resume = {
1824  	.pre_action = snd_pcm_pre_resume,
1825  	.do_action = snd_pcm_do_resume,
1826  	.undo_action = snd_pcm_undo_resume,
1827  	.post_action = snd_pcm_post_resume
1828  };
1829  
snd_pcm_resume(struct snd_pcm_substream * substream)1830  static int snd_pcm_resume(struct snd_pcm_substream *substream)
1831  {
1832  	return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream,
1833  				       ACTION_ARG_IGNORE);
1834  }
1835  
1836  #else
1837  
snd_pcm_resume(struct snd_pcm_substream * substream)1838  static int snd_pcm_resume(struct snd_pcm_substream *substream)
1839  {
1840  	return -ENOSYS;
1841  }
1842  
1843  #endif /* CONFIG_PM */
1844  
1845  /*
1846   * xrun ioctl
1847   *
1848   * Change the RUNNING stream(s) to XRUN state.
1849   */
snd_pcm_xrun(struct snd_pcm_substream * substream)1850  static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1851  {
1852  	struct snd_pcm_runtime *runtime = substream->runtime;
1853  
1854  	guard(pcm_stream_lock_irq)(substream);
1855  	switch (runtime->state) {
1856  	case SNDRV_PCM_STATE_XRUN:
1857  		return 0;	/* already there */
1858  	case SNDRV_PCM_STATE_RUNNING:
1859  		__snd_pcm_xrun(substream);
1860  		return 0;
1861  	default:
1862  		return -EBADFD;
1863  	}
1864  }
1865  
1866  /*
1867   * reset ioctl
1868   */
1869  /* reset callbacks:  state argument ignored */
snd_pcm_pre_reset(struct snd_pcm_substream * substream,snd_pcm_state_t state)1870  static int snd_pcm_pre_reset(struct snd_pcm_substream *substream,
1871  			     snd_pcm_state_t state)
1872  {
1873  	struct snd_pcm_runtime *runtime = substream->runtime;
1874  	switch (runtime->state) {
1875  	case SNDRV_PCM_STATE_RUNNING:
1876  	case SNDRV_PCM_STATE_PREPARED:
1877  	case SNDRV_PCM_STATE_PAUSED:
1878  	case SNDRV_PCM_STATE_SUSPENDED:
1879  		return 0;
1880  	default:
1881  		return -EBADFD;
1882  	}
1883  }
1884  
snd_pcm_do_reset(struct snd_pcm_substream * substream,snd_pcm_state_t state)1885  static int snd_pcm_do_reset(struct snd_pcm_substream *substream,
1886  			    snd_pcm_state_t state)
1887  {
1888  	struct snd_pcm_runtime *runtime = substream->runtime;
1889  	int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
1890  	if (err < 0)
1891  		return err;
1892  	guard(pcm_stream_lock_irq)(substream);
1893  	runtime->hw_ptr_base = 0;
1894  	runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
1895  		runtime->status->hw_ptr % runtime->period_size;
1896  	runtime->silence_start = runtime->status->hw_ptr;
1897  	runtime->silence_filled = 0;
1898  	return 0;
1899  }
1900  
snd_pcm_post_reset(struct snd_pcm_substream * substream,snd_pcm_state_t state)1901  static void snd_pcm_post_reset(struct snd_pcm_substream *substream,
1902  			       snd_pcm_state_t state)
1903  {
1904  	struct snd_pcm_runtime *runtime = substream->runtime;
1905  	guard(pcm_stream_lock_irq)(substream);
1906  	runtime->control->appl_ptr = runtime->status->hw_ptr;
1907  	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1908  	    runtime->silence_size > 0)
1909  		snd_pcm_playback_silence(substream, ULONG_MAX);
1910  }
1911  
1912  static const struct action_ops snd_pcm_action_reset = {
1913  	.pre_action = snd_pcm_pre_reset,
1914  	.do_action = snd_pcm_do_reset,
1915  	.post_action = snd_pcm_post_reset
1916  };
1917  
snd_pcm_reset(struct snd_pcm_substream * substream)1918  static int snd_pcm_reset(struct snd_pcm_substream *substream)
1919  {
1920  	return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream,
1921  					ACTION_ARG_IGNORE);
1922  }
1923  
1924  /*
1925   * prepare ioctl
1926   */
1927  /* pass f_flags as state argument */
snd_pcm_pre_prepare(struct snd_pcm_substream * substream,snd_pcm_state_t state)1928  static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
1929  			       snd_pcm_state_t state)
1930  {
1931  	struct snd_pcm_runtime *runtime = substream->runtime;
1932  	int f_flags = (__force int)state;
1933  
1934  	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
1935  	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
1936  		return -EBADFD;
1937  	if (snd_pcm_running(substream))
1938  		return -EBUSY;
1939  	substream->f_flags = f_flags;
1940  	return 0;
1941  }
1942  
snd_pcm_do_prepare(struct snd_pcm_substream * substream,snd_pcm_state_t state)1943  static int snd_pcm_do_prepare(struct snd_pcm_substream *substream,
1944  			      snd_pcm_state_t state)
1945  {
1946  	int err;
1947  	snd_pcm_sync_stop(substream, true);
1948  	err = substream->ops->prepare(substream);
1949  	if (err < 0)
1950  		return err;
1951  	return snd_pcm_do_reset(substream, state);
1952  }
1953  
snd_pcm_post_prepare(struct snd_pcm_substream * substream,snd_pcm_state_t state)1954  static void snd_pcm_post_prepare(struct snd_pcm_substream *substream,
1955  				 snd_pcm_state_t state)
1956  {
1957  	struct snd_pcm_runtime *runtime = substream->runtime;
1958  	runtime->control->appl_ptr = runtime->status->hw_ptr;
1959  	snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
1960  }
1961  
1962  static const struct action_ops snd_pcm_action_prepare = {
1963  	.pre_action = snd_pcm_pre_prepare,
1964  	.do_action = snd_pcm_do_prepare,
1965  	.post_action = snd_pcm_post_prepare
1966  };
1967  
1968  /**
1969   * snd_pcm_prepare - prepare the PCM substream to be triggerable
1970   * @substream: the PCM substream instance
1971   * @file: file to refer f_flags
1972   *
1973   * Return: Zero if successful, or a negative error code.
1974   */
snd_pcm_prepare(struct snd_pcm_substream * substream,struct file * file)1975  static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1976  			   struct file *file)
1977  {
1978  	int f_flags;
1979  
1980  	if (file)
1981  		f_flags = file->f_flags;
1982  	else
1983  		f_flags = substream->f_flags;
1984  
1985  	scoped_guard(pcm_stream_lock_irq, substream) {
1986  		switch (substream->runtime->state) {
1987  		case SNDRV_PCM_STATE_PAUSED:
1988  			snd_pcm_pause(substream, false);
1989  			fallthrough;
1990  		case SNDRV_PCM_STATE_SUSPENDED:
1991  			snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1992  			break;
1993  		}
1994  	}
1995  
1996  	return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
1997  					substream,
1998  					(__force snd_pcm_state_t)f_flags);
1999  }
2000  
2001  /*
2002   * drain ioctl
2003   */
2004  
2005  /* drain init callbacks: state argument ignored */
snd_pcm_pre_drain_init(struct snd_pcm_substream * substream,snd_pcm_state_t state)2006  static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream,
2007  				  snd_pcm_state_t state)
2008  {
2009  	struct snd_pcm_runtime *runtime = substream->runtime;
2010  	switch (runtime->state) {
2011  	case SNDRV_PCM_STATE_OPEN:
2012  	case SNDRV_PCM_STATE_DISCONNECTED:
2013  	case SNDRV_PCM_STATE_SUSPENDED:
2014  		return -EBADFD;
2015  	}
2016  	runtime->trigger_master = substream;
2017  	return 0;
2018  }
2019  
snd_pcm_do_drain_init(struct snd_pcm_substream * substream,snd_pcm_state_t state)2020  static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream,
2021  				 snd_pcm_state_t state)
2022  {
2023  	struct snd_pcm_runtime *runtime = substream->runtime;
2024  	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
2025  		switch (runtime->state) {
2026  		case SNDRV_PCM_STATE_PREPARED:
2027  			/* start playback stream if possible */
2028  			if (! snd_pcm_playback_empty(substream)) {
2029  				snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
2030  				snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
2031  			} else {
2032  				__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_SETUP);
2033  			}
2034  			break;
2035  		case SNDRV_PCM_STATE_RUNNING:
2036  			__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_DRAINING);
2037  			break;
2038  		case SNDRV_PCM_STATE_XRUN:
2039  			__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_SETUP);
2040  			break;
2041  		default:
2042  			break;
2043  		}
2044  	} else {
2045  		/* stop running stream */
2046  		if (runtime->state == SNDRV_PCM_STATE_RUNNING) {
2047  			snd_pcm_state_t new_state;
2048  
2049  			new_state = snd_pcm_capture_avail(runtime) > 0 ?
2050  				SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
2051  			snd_pcm_do_stop(substream, new_state);
2052  			snd_pcm_post_stop(substream, new_state);
2053  		}
2054  	}
2055  
2056  	if (runtime->state == SNDRV_PCM_STATE_DRAINING &&
2057  	    runtime->trigger_master == substream &&
2058  	    (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
2059  		return substream->ops->trigger(substream,
2060  					       SNDRV_PCM_TRIGGER_DRAIN);
2061  
2062  	return 0;
2063  }
2064  
snd_pcm_post_drain_init(struct snd_pcm_substream * substream,snd_pcm_state_t state)2065  static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream,
2066  				    snd_pcm_state_t state)
2067  {
2068  }
2069  
2070  static const struct action_ops snd_pcm_action_drain_init = {
2071  	.pre_action = snd_pcm_pre_drain_init,
2072  	.do_action = snd_pcm_do_drain_init,
2073  	.post_action = snd_pcm_post_drain_init
2074  };
2075  
2076  /*
2077   * Drain the stream(s).
2078   * When the substream is linked, sync until the draining of all playback streams
2079   * is finished.
2080   * After this call, all streams are supposed to be either SETUP or DRAINING
2081   * (capture only) state.
2082   */
snd_pcm_drain(struct snd_pcm_substream * substream,struct file * file)2083  static int snd_pcm_drain(struct snd_pcm_substream *substream,
2084  			 struct file *file)
2085  {
2086  	struct snd_card *card;
2087  	struct snd_pcm_runtime *runtime;
2088  	struct snd_pcm_substream *s;
2089  	struct snd_pcm_group *group;
2090  	wait_queue_entry_t wait;
2091  	int result = 0;
2092  	int nonblock = 0;
2093  
2094  	card = substream->pcm->card;
2095  	runtime = substream->runtime;
2096  
2097  	if (runtime->state == SNDRV_PCM_STATE_OPEN)
2098  		return -EBADFD;
2099  
2100  	if (file) {
2101  		if (file->f_flags & O_NONBLOCK)
2102  			nonblock = 1;
2103  	} else if (substream->f_flags & O_NONBLOCK)
2104  		nonblock = 1;
2105  
2106  	snd_pcm_stream_lock_irq(substream);
2107  	/* resume pause */
2108  	if (runtime->state == SNDRV_PCM_STATE_PAUSED)
2109  		snd_pcm_pause(substream, false);
2110  
2111  	/* pre-start/stop - all running streams are changed to DRAINING state */
2112  	result = snd_pcm_action(&snd_pcm_action_drain_init, substream,
2113  				ACTION_ARG_IGNORE);
2114  	if (result < 0)
2115  		goto unlock;
2116  	/* in non-blocking, we don't wait in ioctl but let caller poll */
2117  	if (nonblock) {
2118  		result = -EAGAIN;
2119  		goto unlock;
2120  	}
2121  
2122  	for (;;) {
2123  		long tout;
2124  		struct snd_pcm_runtime *to_check;
2125  		if (signal_pending(current)) {
2126  			result = -ERESTARTSYS;
2127  			break;
2128  		}
2129  		/* find a substream to drain */
2130  		to_check = NULL;
2131  		group = snd_pcm_stream_group_ref(substream);
2132  		snd_pcm_group_for_each_entry(s, substream) {
2133  			if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
2134  				continue;
2135  			runtime = s->runtime;
2136  			if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
2137  				to_check = runtime;
2138  				break;
2139  			}
2140  		}
2141  		snd_pcm_group_unref(group, substream);
2142  		if (!to_check)
2143  			break; /* all drained */
2144  		init_waitqueue_entry(&wait, current);
2145  		set_current_state(TASK_INTERRUPTIBLE);
2146  		add_wait_queue(&to_check->sleep, &wait);
2147  		snd_pcm_stream_unlock_irq(substream);
2148  		if (runtime->no_period_wakeup)
2149  			tout = MAX_SCHEDULE_TIMEOUT;
2150  		else {
2151  			tout = 100;
2152  			if (runtime->rate) {
2153  				long t = runtime->buffer_size * 1100 / runtime->rate;
2154  				tout = max(t, tout);
2155  			}
2156  			tout = msecs_to_jiffies(tout);
2157  		}
2158  		tout = schedule_timeout(tout);
2159  
2160  		snd_pcm_stream_lock_irq(substream);
2161  		group = snd_pcm_stream_group_ref(substream);
2162  		snd_pcm_group_for_each_entry(s, substream) {
2163  			if (s->runtime == to_check) {
2164  				remove_wait_queue(&to_check->sleep, &wait);
2165  				break;
2166  			}
2167  		}
2168  		snd_pcm_group_unref(group, substream);
2169  
2170  		if (card->shutdown) {
2171  			result = -ENODEV;
2172  			break;
2173  		}
2174  		if (tout == 0) {
2175  			if (substream->runtime->state == SNDRV_PCM_STATE_SUSPENDED)
2176  				result = -ESTRPIPE;
2177  			else {
2178  				dev_dbg(substream->pcm->card->dev,
2179  					"playback drain timeout (DMA or IRQ trouble?)\n");
2180  				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2181  				result = -EIO;
2182  			}
2183  			break;
2184  		}
2185  	}
2186  
2187   unlock:
2188  	snd_pcm_stream_unlock_irq(substream);
2189  
2190  	return result;
2191  }
2192  
2193  /*
2194   * drop ioctl
2195   *
2196   * Immediately put all linked substreams into SETUP state.
2197   */
snd_pcm_drop(struct snd_pcm_substream * substream)2198  static int snd_pcm_drop(struct snd_pcm_substream *substream)
2199  {
2200  	struct snd_pcm_runtime *runtime;
2201  	int result = 0;
2202  
2203  	if (PCM_RUNTIME_CHECK(substream))
2204  		return -ENXIO;
2205  	runtime = substream->runtime;
2206  
2207  	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
2208  	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
2209  		return -EBADFD;
2210  
2211  	guard(pcm_stream_lock_irq)(substream);
2212  	/* resume pause */
2213  	if (runtime->state == SNDRV_PCM_STATE_PAUSED)
2214  		snd_pcm_pause(substream, false);
2215  
2216  	snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2217  	/* runtime->control->appl_ptr = runtime->status->hw_ptr; */
2218  
2219  	return result;
2220  }
2221  
2222  
is_pcm_file(struct file * file)2223  static bool is_pcm_file(struct file *file)
2224  {
2225  	struct inode *inode = file_inode(file);
2226  	struct snd_pcm *pcm;
2227  	unsigned int minor;
2228  
2229  	if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
2230  		return false;
2231  	minor = iminor(inode);
2232  	pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2233  	if (!pcm)
2234  		pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2235  	if (!pcm)
2236  		return false;
2237  	snd_card_unref(pcm->card);
2238  	return true;
2239  }
2240  
2241  /*
2242   * PCM link handling
2243   */
snd_pcm_link(struct snd_pcm_substream * substream,int fd)2244  static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
2245  {
2246  	struct snd_pcm_file *pcm_file;
2247  	struct snd_pcm_substream *substream1;
2248  	struct snd_pcm_group *group __free(kfree) = NULL;
2249  	struct snd_pcm_group *target_group;
2250  	bool nonatomic = substream->pcm->nonatomic;
2251  	CLASS(fd, f)(fd);
2252  
2253  	if (!fd_file(f))
2254  		return -EBADFD;
2255  	if (!is_pcm_file(fd_file(f)))
2256  		return -EBADFD;
2257  
2258  	pcm_file = fd_file(f)->private_data;
2259  	substream1 = pcm_file->substream;
2260  
2261  	if (substream == substream1)
2262  		return -EINVAL;
2263  
2264  	group = kzalloc(sizeof(*group), GFP_KERNEL);
2265  	if (!group)
2266  		return -ENOMEM;
2267  	snd_pcm_group_init(group);
2268  
2269  	guard(rwsem_write)(&snd_pcm_link_rwsem);
2270  	if (substream->runtime->state == SNDRV_PCM_STATE_OPEN ||
2271  	    substream->runtime->state != substream1->runtime->state ||
2272  	    substream->pcm->nonatomic != substream1->pcm->nonatomic)
2273  		return -EBADFD;
2274  	if (snd_pcm_stream_linked(substream1))
2275  		return -EALREADY;
2276  
2277  	scoped_guard(pcm_stream_lock_irq, substream) {
2278  		if (!snd_pcm_stream_linked(substream)) {
2279  			snd_pcm_group_assign(substream, group);
2280  			group = NULL; /* assigned, don't free this one below */
2281  		}
2282  		target_group = substream->group;
2283  	}
2284  
2285  	snd_pcm_group_lock_irq(target_group, nonatomic);
2286  	snd_pcm_stream_lock_nested(substream1);
2287  	snd_pcm_group_assign(substream1, target_group);
2288  	refcount_inc(&target_group->refs);
2289  	snd_pcm_stream_unlock(substream1);
2290  	snd_pcm_group_unlock_irq(target_group, nonatomic);
2291  	return 0;
2292  }
2293  
relink_to_local(struct snd_pcm_substream * substream)2294  static void relink_to_local(struct snd_pcm_substream *substream)
2295  {
2296  	snd_pcm_stream_lock_nested(substream);
2297  	snd_pcm_group_assign(substream, &substream->self_group);
2298  	snd_pcm_stream_unlock(substream);
2299  }
2300  
snd_pcm_unlink(struct snd_pcm_substream * substream)2301  static int snd_pcm_unlink(struct snd_pcm_substream *substream)
2302  {
2303  	struct snd_pcm_group *group;
2304  	bool nonatomic = substream->pcm->nonatomic;
2305  	bool do_free = false;
2306  
2307  	guard(rwsem_write)(&snd_pcm_link_rwsem);
2308  
2309  	if (!snd_pcm_stream_linked(substream))
2310  		return -EALREADY;
2311  
2312  	group = substream->group;
2313  	snd_pcm_group_lock_irq(group, nonatomic);
2314  
2315  	relink_to_local(substream);
2316  	refcount_dec(&group->refs);
2317  
2318  	/* detach the last stream, too */
2319  	if (list_is_singular(&group->substreams)) {
2320  		relink_to_local(list_first_entry(&group->substreams,
2321  						 struct snd_pcm_substream,
2322  						 link_list));
2323  		do_free = refcount_dec_and_test(&group->refs);
2324  	}
2325  
2326  	snd_pcm_group_unlock_irq(group, nonatomic);
2327  	if (do_free)
2328  		kfree(group);
2329  	return 0;
2330  }
2331  
2332  /*
2333   * hw configurator
2334   */
snd_pcm_hw_rule_mul(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2335  static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
2336  			       struct snd_pcm_hw_rule *rule)
2337  {
2338  	struct snd_interval t;
2339  	snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
2340  		     hw_param_interval_c(params, rule->deps[1]), &t);
2341  	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2342  }
2343  
snd_pcm_hw_rule_div(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2344  static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
2345  			       struct snd_pcm_hw_rule *rule)
2346  {
2347  	struct snd_interval t;
2348  	snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
2349  		     hw_param_interval_c(params, rule->deps[1]), &t);
2350  	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2351  }
2352  
snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2353  static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
2354  				   struct snd_pcm_hw_rule *rule)
2355  {
2356  	struct snd_interval t;
2357  	snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
2358  			 hw_param_interval_c(params, rule->deps[1]),
2359  			 (unsigned long) rule->private, &t);
2360  	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2361  }
2362  
snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2363  static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
2364  				   struct snd_pcm_hw_rule *rule)
2365  {
2366  	struct snd_interval t;
2367  	snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
2368  			 (unsigned long) rule->private,
2369  			 hw_param_interval_c(params, rule->deps[1]), &t);
2370  	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2371  }
2372  
snd_pcm_hw_rule_format(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2373  static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
2374  				  struct snd_pcm_hw_rule *rule)
2375  {
2376  	snd_pcm_format_t k;
2377  	const struct snd_interval *i =
2378  				hw_param_interval_c(params, rule->deps[0]);
2379  	struct snd_mask m;
2380  	struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2381  	snd_mask_any(&m);
2382  	pcm_for_each_format(k) {
2383  		int bits;
2384  		if (!snd_mask_test_format(mask, k))
2385  			continue;
2386  		bits = snd_pcm_format_physical_width(k);
2387  		if (bits <= 0)
2388  			continue; /* ignore invalid formats */
2389  		if ((unsigned)bits < i->min || (unsigned)bits > i->max)
2390  			snd_mask_reset(&m, (__force unsigned)k);
2391  	}
2392  	return snd_mask_refine(mask, &m);
2393  }
2394  
snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2395  static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
2396  				       struct snd_pcm_hw_rule *rule)
2397  {
2398  	struct snd_interval t;
2399  	snd_pcm_format_t k;
2400  
2401  	t.min = UINT_MAX;
2402  	t.max = 0;
2403  	t.openmin = 0;
2404  	t.openmax = 0;
2405  	pcm_for_each_format(k) {
2406  		int bits;
2407  		if (!snd_mask_test_format(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
2408  			continue;
2409  		bits = snd_pcm_format_physical_width(k);
2410  		if (bits <= 0)
2411  			continue; /* ignore invalid formats */
2412  		if (t.min > (unsigned)bits)
2413  			t.min = bits;
2414  		if (t.max < (unsigned)bits)
2415  			t.max = bits;
2416  	}
2417  	t.integer = 1;
2418  	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2419  }
2420  
2421  #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12 ||\
2422  	SNDRV_PCM_RATE_128000 != 1 << 19
2423  #error "Change this table"
2424  #endif
2425  
2426  /* NOTE: the list is unsorted! */
2427  static const unsigned int rates[] = {
2428  	5512, 8000, 11025, 16000, 22050, 32000, 44100,
2429  	48000, 64000, 88200, 96000, 176400, 192000, 352800, 384000, 705600, 768000,
2430  	/* extended */
2431  	12000, 24000, 128000
2432  };
2433  
2434  const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
2435  	.count = ARRAY_SIZE(rates),
2436  	.list = rates,
2437  };
2438  
snd_pcm_hw_rule_rate(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2439  static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
2440  				struct snd_pcm_hw_rule *rule)
2441  {
2442  	struct snd_pcm_hardware *hw = rule->private;
2443  	return snd_interval_list(hw_param_interval(params, rule->var),
2444  				 snd_pcm_known_rates.count,
2445  				 snd_pcm_known_rates.list, hw->rates);
2446  }
2447  
snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2448  static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
2449  					    struct snd_pcm_hw_rule *rule)
2450  {
2451  	struct snd_interval t;
2452  	struct snd_pcm_substream *substream = rule->private;
2453  	t.min = 0;
2454  	t.max = substream->buffer_bytes_max;
2455  	t.openmin = 0;
2456  	t.openmax = 0;
2457  	t.integer = 1;
2458  	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2459  }
2460  
snd_pcm_hw_rule_subformats(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2461  static int snd_pcm_hw_rule_subformats(struct snd_pcm_hw_params *params,
2462  				      struct snd_pcm_hw_rule *rule)
2463  {
2464  	struct snd_mask *sfmask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_SUBFORMAT);
2465  	struct snd_mask *fmask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2466  	u32 *subformats = rule->private;
2467  	snd_pcm_format_t f;
2468  	struct snd_mask m;
2469  
2470  	snd_mask_none(&m);
2471  	/* All PCMs support at least the default STD subformat. */
2472  	snd_mask_set(&m, (__force unsigned)SNDRV_PCM_SUBFORMAT_STD);
2473  
2474  	pcm_for_each_format(f) {
2475  		if (!snd_mask_test(fmask, (__force unsigned)f))
2476  			continue;
2477  
2478  		if (f == SNDRV_PCM_FORMAT_S32_LE && *subformats)
2479  			m.bits[0] |= *subformats;
2480  		else if (snd_pcm_format_linear(f))
2481  			snd_mask_set(&m, (__force unsigned)SNDRV_PCM_SUBFORMAT_MSBITS_MAX);
2482  	}
2483  
2484  	return snd_mask_refine(sfmask, &m);
2485  }
2486  
snd_pcm_hw_constraint_subformats(struct snd_pcm_runtime * runtime,unsigned int cond,u32 * subformats)2487  static int snd_pcm_hw_constraint_subformats(struct snd_pcm_runtime *runtime,
2488  					   unsigned int cond, u32 *subformats)
2489  {
2490  	return snd_pcm_hw_rule_add(runtime, cond, -1,
2491  				   snd_pcm_hw_rule_subformats, (void *)subformats,
2492  				   SNDRV_PCM_HW_PARAM_SUBFORMAT,
2493  				   SNDRV_PCM_HW_PARAM_FORMAT, -1);
2494  }
2495  
snd_pcm_hw_constraints_init(struct snd_pcm_substream * substream)2496  static int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
2497  {
2498  	struct snd_pcm_runtime *runtime = substream->runtime;
2499  	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
2500  	int k, err;
2501  
2502  	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
2503  		snd_mask_any(constrs_mask(constrs, k));
2504  	}
2505  
2506  	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
2507  		snd_interval_any(constrs_interval(constrs, k));
2508  	}
2509  
2510  	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
2511  	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
2512  	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
2513  	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
2514  	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
2515  
2516  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
2517  				   snd_pcm_hw_rule_format, NULL,
2518  				   SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2519  	if (err < 0)
2520  		return err;
2521  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2522  				  snd_pcm_hw_rule_sample_bits, NULL,
2523  				  SNDRV_PCM_HW_PARAM_FORMAT,
2524  				  SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2525  	if (err < 0)
2526  		return err;
2527  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2528  				  snd_pcm_hw_rule_div, NULL,
2529  				  SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2530  	if (err < 0)
2531  		return err;
2532  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2533  				  snd_pcm_hw_rule_mul, NULL,
2534  				  SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2535  	if (err < 0)
2536  		return err;
2537  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2538  				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2539  				  SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2540  	if (err < 0)
2541  		return err;
2542  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2543  				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2544  				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
2545  	if (err < 0)
2546  		return err;
2547  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2548  				  snd_pcm_hw_rule_div, NULL,
2549  				  SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2550  	if (err < 0)
2551  		return err;
2552  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2553  				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2554  				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
2555  	if (err < 0)
2556  		return err;
2557  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2558  				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2559  				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
2560  	if (err < 0)
2561  		return err;
2562  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
2563  				  snd_pcm_hw_rule_div, NULL,
2564  				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2565  	if (err < 0)
2566  		return err;
2567  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2568  				  snd_pcm_hw_rule_div, NULL,
2569  				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2570  	if (err < 0)
2571  		return err;
2572  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2573  				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2574  				  SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2575  	if (err < 0)
2576  		return err;
2577  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2578  				  snd_pcm_hw_rule_muldivk, (void*) 1000000,
2579  				  SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2580  	if (err < 0)
2581  		return err;
2582  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2583  				  snd_pcm_hw_rule_mul, NULL,
2584  				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2585  	if (err < 0)
2586  		return err;
2587  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2588  				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2589  				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2590  	if (err < 0)
2591  		return err;
2592  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2593  				  snd_pcm_hw_rule_muldivk, (void*) 1000000,
2594  				  SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2595  	if (err < 0)
2596  		return err;
2597  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2598  				  snd_pcm_hw_rule_muldivk, (void*) 8,
2599  				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2600  	if (err < 0)
2601  		return err;
2602  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2603  				  snd_pcm_hw_rule_muldivk, (void*) 8,
2604  				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2605  	if (err < 0)
2606  		return err;
2607  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
2608  				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2609  				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2610  	if (err < 0)
2611  		return err;
2612  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
2613  				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2614  				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2615  	if (err < 0)
2616  		return err;
2617  	return 0;
2618  }
2619  
snd_pcm_hw_constraints_complete(struct snd_pcm_substream * substream)2620  static int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
2621  {
2622  	struct snd_pcm_runtime *runtime = substream->runtime;
2623  	struct snd_pcm_hardware *hw = &runtime->hw;
2624  	int err;
2625  	unsigned int mask = 0;
2626  
2627          if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2628  		mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_INTERLEAVED);
2629          if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2630  		mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_NONINTERLEAVED);
2631  	if (hw_support_mmap(substream)) {
2632  		if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2633  			mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_INTERLEAVED);
2634  		if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2635  			mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED);
2636  		if (hw->info & SNDRV_PCM_INFO_COMPLEX)
2637  			mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_COMPLEX);
2638  	}
2639  	err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
2640  	if (err < 0)
2641  		return err;
2642  
2643  	err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
2644  	if (err < 0)
2645  		return err;
2646  
2647  	err = snd_pcm_hw_constraint_subformats(runtime, 0, &hw->subformats);
2648  	if (err < 0)
2649  		return err;
2650  
2651  	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
2652  					   hw->channels_min, hw->channels_max);
2653  	if (err < 0)
2654  		return err;
2655  
2656  	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
2657  					   hw->rate_min, hw->rate_max);
2658  	if (err < 0)
2659  		return err;
2660  
2661  	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2662  					   hw->period_bytes_min, hw->period_bytes_max);
2663  	if (err < 0)
2664  		return err;
2665  
2666  	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
2667  					   hw->periods_min, hw->periods_max);
2668  	if (err < 0)
2669  		return err;
2670  
2671  	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2672  					   hw->period_bytes_min, hw->buffer_bytes_max);
2673  	if (err < 0)
2674  		return err;
2675  
2676  	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2677  				  snd_pcm_hw_rule_buffer_bytes_max, substream,
2678  				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
2679  	if (err < 0)
2680  		return err;
2681  
2682  	/* FIXME: remove */
2683  	if (runtime->dma_bytes) {
2684  		err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
2685  		if (err < 0)
2686  			return err;
2687  	}
2688  
2689  	if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
2690  		err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2691  					  snd_pcm_hw_rule_rate, hw,
2692  					  SNDRV_PCM_HW_PARAM_RATE, -1);
2693  		if (err < 0)
2694  			return err;
2695  	}
2696  
2697  	/* FIXME: this belong to lowlevel */
2698  	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
2699  
2700  	return 0;
2701  }
2702  
pcm_release_private(struct snd_pcm_substream * substream)2703  static void pcm_release_private(struct snd_pcm_substream *substream)
2704  {
2705  	if (snd_pcm_stream_linked(substream))
2706  		snd_pcm_unlink(substream);
2707  }
2708  
snd_pcm_release_substream(struct snd_pcm_substream * substream)2709  void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2710  {
2711  	substream->ref_count--;
2712  	if (substream->ref_count > 0)
2713  		return;
2714  
2715  	snd_pcm_drop(substream);
2716  	if (substream->hw_opened) {
2717  		if (substream->runtime->state != SNDRV_PCM_STATE_OPEN)
2718  			do_hw_free(substream);
2719  		substream->ops->close(substream);
2720  		substream->hw_opened = 0;
2721  	}
2722  	if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
2723  		cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
2724  	if (substream->pcm_release) {
2725  		substream->pcm_release(substream);
2726  		substream->pcm_release = NULL;
2727  	}
2728  	snd_pcm_detach_substream(substream);
2729  }
2730  EXPORT_SYMBOL(snd_pcm_release_substream);
2731  
snd_pcm_open_substream(struct snd_pcm * pcm,int stream,struct file * file,struct snd_pcm_substream ** rsubstream)2732  int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2733  			   struct file *file,
2734  			   struct snd_pcm_substream **rsubstream)
2735  {
2736  	struct snd_pcm_substream *substream;
2737  	int err;
2738  
2739  	err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2740  	if (err < 0)
2741  		return err;
2742  	if (substream->ref_count > 1) {
2743  		*rsubstream = substream;
2744  		return 0;
2745  	}
2746  
2747  	err = snd_pcm_hw_constraints_init(substream);
2748  	if (err < 0) {
2749  		pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
2750  		goto error;
2751  	}
2752  
2753  	err = substream->ops->open(substream);
2754  	if (err < 0)
2755  		goto error;
2756  
2757  	substream->hw_opened = 1;
2758  
2759  	err = snd_pcm_hw_constraints_complete(substream);
2760  	if (err < 0) {
2761  		pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
2762  		goto error;
2763  	}
2764  
2765  	/* automatically set EXPLICIT_SYNC flag in the managed mode whenever
2766  	 * the DMA buffer requires it
2767  	 */
2768  	if (substream->managed_buffer_alloc &&
2769  	    substream->dma_buffer.dev.need_sync)
2770  		substream->runtime->hw.info |= SNDRV_PCM_INFO_EXPLICIT_SYNC;
2771  
2772  	*rsubstream = substream;
2773  	return 0;
2774  
2775   error:
2776  	snd_pcm_release_substream(substream);
2777  	return err;
2778  }
2779  EXPORT_SYMBOL(snd_pcm_open_substream);
2780  
snd_pcm_open_file(struct file * file,struct snd_pcm * pcm,int stream)2781  static int snd_pcm_open_file(struct file *file,
2782  			     struct snd_pcm *pcm,
2783  			     int stream)
2784  {
2785  	struct snd_pcm_file *pcm_file;
2786  	struct snd_pcm_substream *substream;
2787  	int err;
2788  
2789  	err = snd_pcm_open_substream(pcm, stream, file, &substream);
2790  	if (err < 0)
2791  		return err;
2792  
2793  	pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2794  	if (pcm_file == NULL) {
2795  		snd_pcm_release_substream(substream);
2796  		return -ENOMEM;
2797  	}
2798  	pcm_file->substream = substream;
2799  	if (substream->ref_count == 1)
2800  		substream->pcm_release = pcm_release_private;
2801  	file->private_data = pcm_file;
2802  
2803  	return 0;
2804  }
2805  
snd_pcm_playback_open(struct inode * inode,struct file * file)2806  static int snd_pcm_playback_open(struct inode *inode, struct file *file)
2807  {
2808  	struct snd_pcm *pcm;
2809  	int err = nonseekable_open(inode, file);
2810  	if (err < 0)
2811  		return err;
2812  	pcm = snd_lookup_minor_data(iminor(inode),
2813  				    SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2814  	err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
2815  	if (pcm)
2816  		snd_card_unref(pcm->card);
2817  	return err;
2818  }
2819  
snd_pcm_capture_open(struct inode * inode,struct file * file)2820  static int snd_pcm_capture_open(struct inode *inode, struct file *file)
2821  {
2822  	struct snd_pcm *pcm;
2823  	int err = nonseekable_open(inode, file);
2824  	if (err < 0)
2825  		return err;
2826  	pcm = snd_lookup_minor_data(iminor(inode),
2827  				    SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2828  	err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
2829  	if (pcm)
2830  		snd_card_unref(pcm->card);
2831  	return err;
2832  }
2833  
snd_pcm_open(struct file * file,struct snd_pcm * pcm,int stream)2834  static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
2835  {
2836  	int err;
2837  	wait_queue_entry_t wait;
2838  
2839  	if (pcm == NULL) {
2840  		err = -ENODEV;
2841  		goto __error1;
2842  	}
2843  	err = snd_card_file_add(pcm->card, file);
2844  	if (err < 0)
2845  		goto __error1;
2846  	if (!try_module_get(pcm->card->module)) {
2847  		err = -EFAULT;
2848  		goto __error2;
2849  	}
2850  	init_waitqueue_entry(&wait, current);
2851  	add_wait_queue(&pcm->open_wait, &wait);
2852  	mutex_lock(&pcm->open_mutex);
2853  	while (1) {
2854  		err = snd_pcm_open_file(file, pcm, stream);
2855  		if (err >= 0)
2856  			break;
2857  		if (err == -EAGAIN) {
2858  			if (file->f_flags & O_NONBLOCK) {
2859  				err = -EBUSY;
2860  				break;
2861  			}
2862  		} else
2863  			break;
2864  		set_current_state(TASK_INTERRUPTIBLE);
2865  		mutex_unlock(&pcm->open_mutex);
2866  		schedule();
2867  		mutex_lock(&pcm->open_mutex);
2868  		if (pcm->card->shutdown) {
2869  			err = -ENODEV;
2870  			break;
2871  		}
2872  		if (signal_pending(current)) {
2873  			err = -ERESTARTSYS;
2874  			break;
2875  		}
2876  	}
2877  	remove_wait_queue(&pcm->open_wait, &wait);
2878  	mutex_unlock(&pcm->open_mutex);
2879  	if (err < 0)
2880  		goto __error;
2881  	return err;
2882  
2883        __error:
2884  	module_put(pcm->card->module);
2885        __error2:
2886        	snd_card_file_remove(pcm->card, file);
2887        __error1:
2888        	return err;
2889  }
2890  
snd_pcm_release(struct inode * inode,struct file * file)2891  static int snd_pcm_release(struct inode *inode, struct file *file)
2892  {
2893  	struct snd_pcm *pcm;
2894  	struct snd_pcm_substream *substream;
2895  	struct snd_pcm_file *pcm_file;
2896  
2897  	pcm_file = file->private_data;
2898  	substream = pcm_file->substream;
2899  	if (snd_BUG_ON(!substream))
2900  		return -ENXIO;
2901  	pcm = substream->pcm;
2902  
2903  	/* block until the device gets woken up as it may touch the hardware */
2904  	snd_power_wait(pcm->card);
2905  
2906  	scoped_guard(mutex, &pcm->open_mutex) {
2907  		snd_pcm_release_substream(substream);
2908  		kfree(pcm_file);
2909  	}
2910  	wake_up(&pcm->open_wait);
2911  	module_put(pcm->card->module);
2912  	snd_card_file_remove(pcm->card, file);
2913  	return 0;
2914  }
2915  
2916  /* check and update PCM state; return 0 or a negative error
2917   * call this inside PCM lock
2918   */
do_pcm_hwsync(struct snd_pcm_substream * substream)2919  static int do_pcm_hwsync(struct snd_pcm_substream *substream)
2920  {
2921  	switch (substream->runtime->state) {
2922  	case SNDRV_PCM_STATE_DRAINING:
2923  		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
2924  			return -EBADFD;
2925  		fallthrough;
2926  	case SNDRV_PCM_STATE_RUNNING:
2927  		return snd_pcm_update_hw_ptr(substream);
2928  	case SNDRV_PCM_STATE_PREPARED:
2929  	case SNDRV_PCM_STATE_PAUSED:
2930  		return 0;
2931  	case SNDRV_PCM_STATE_SUSPENDED:
2932  		return -ESTRPIPE;
2933  	case SNDRV_PCM_STATE_XRUN:
2934  		return -EPIPE;
2935  	default:
2936  		return -EBADFD;
2937  	}
2938  }
2939  
2940  /* increase the appl_ptr; returns the processed frames or a negative error */
forward_appl_ptr(struct snd_pcm_substream * substream,snd_pcm_uframes_t frames,snd_pcm_sframes_t avail)2941  static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2942  					  snd_pcm_uframes_t frames,
2943  					   snd_pcm_sframes_t avail)
2944  {
2945  	struct snd_pcm_runtime *runtime = substream->runtime;
2946  	snd_pcm_sframes_t appl_ptr;
2947  	int ret;
2948  
2949  	if (avail <= 0)
2950  		return 0;
2951  	if (frames > (snd_pcm_uframes_t)avail)
2952  		frames = avail;
2953  	appl_ptr = runtime->control->appl_ptr + frames;
2954  	if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
2955  		appl_ptr -= runtime->boundary;
2956  	ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2957  	return ret < 0 ? ret : frames;
2958  }
2959  
2960  /* decrease the appl_ptr; returns the processed frames or zero for error */
rewind_appl_ptr(struct snd_pcm_substream * substream,snd_pcm_uframes_t frames,snd_pcm_sframes_t avail)2961  static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2962  					 snd_pcm_uframes_t frames,
2963  					 snd_pcm_sframes_t avail)
2964  {
2965  	struct snd_pcm_runtime *runtime = substream->runtime;
2966  	snd_pcm_sframes_t appl_ptr;
2967  	int ret;
2968  
2969  	if (avail <= 0)
2970  		return 0;
2971  	if (frames > (snd_pcm_uframes_t)avail)
2972  		frames = avail;
2973  	appl_ptr = runtime->control->appl_ptr - frames;
2974  	if (appl_ptr < 0)
2975  		appl_ptr += runtime->boundary;
2976  	ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2977  	/* NOTE: we return zero for errors because PulseAudio gets depressed
2978  	 * upon receiving an error from rewind ioctl and stops processing
2979  	 * any longer.  Returning zero means that no rewind is done, so
2980  	 * it's not absolutely wrong to answer like that.
2981  	 */
2982  	return ret < 0 ? 0 : frames;
2983  }
2984  
snd_pcm_rewind(struct snd_pcm_substream * substream,snd_pcm_uframes_t frames)2985  static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream,
2986  					snd_pcm_uframes_t frames)
2987  {
2988  	snd_pcm_sframes_t ret;
2989  
2990  	if (frames == 0)
2991  		return 0;
2992  
2993  	scoped_guard(pcm_stream_lock_irq, substream) {
2994  		ret = do_pcm_hwsync(substream);
2995  		if (!ret)
2996  			ret = rewind_appl_ptr(substream, frames,
2997  					      snd_pcm_hw_avail(substream));
2998  	}
2999  	if (ret >= 0)
3000  		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
3001  	return ret;
3002  }
3003  
snd_pcm_forward(struct snd_pcm_substream * substream,snd_pcm_uframes_t frames)3004  static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream,
3005  					 snd_pcm_uframes_t frames)
3006  {
3007  	snd_pcm_sframes_t ret;
3008  
3009  	if (frames == 0)
3010  		return 0;
3011  
3012  	scoped_guard(pcm_stream_lock_irq, substream) {
3013  		ret = do_pcm_hwsync(substream);
3014  		if (!ret)
3015  			ret = forward_appl_ptr(substream, frames,
3016  					       snd_pcm_avail(substream));
3017  	}
3018  	if (ret >= 0)
3019  		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
3020  	return ret;
3021  }
3022  
snd_pcm_delay(struct snd_pcm_substream * substream,snd_pcm_sframes_t * delay)3023  static int snd_pcm_delay(struct snd_pcm_substream *substream,
3024  			 snd_pcm_sframes_t *delay)
3025  {
3026  	int err;
3027  
3028  	scoped_guard(pcm_stream_lock_irq, substream) {
3029  		err = do_pcm_hwsync(substream);
3030  		if (delay && !err)
3031  			*delay = snd_pcm_calc_delay(substream);
3032  	}
3033  	snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
3034  
3035  	return err;
3036  }
3037  
snd_pcm_hwsync(struct snd_pcm_substream * substream)3038  static inline int snd_pcm_hwsync(struct snd_pcm_substream *substream)
3039  {
3040  	return snd_pcm_delay(substream, NULL);
3041  }
3042  
snd_pcm_sync_ptr(struct snd_pcm_substream * substream,struct snd_pcm_sync_ptr __user * _sync_ptr)3043  static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
3044  			    struct snd_pcm_sync_ptr __user *_sync_ptr)
3045  {
3046  	struct snd_pcm_runtime *runtime = substream->runtime;
3047  	struct snd_pcm_sync_ptr sync_ptr;
3048  	volatile struct snd_pcm_mmap_status *status;
3049  	volatile struct snd_pcm_mmap_control *control;
3050  	int err;
3051  
3052  	memset(&sync_ptr, 0, sizeof(sync_ptr));
3053  	if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
3054  		return -EFAULT;
3055  	if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
3056  		return -EFAULT;
3057  	status = runtime->status;
3058  	control = runtime->control;
3059  	if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
3060  		err = snd_pcm_hwsync(substream);
3061  		if (err < 0)
3062  			return err;
3063  	}
3064  	scoped_guard(pcm_stream_lock_irq, substream) {
3065  		if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
3066  			err = pcm_lib_apply_appl_ptr(substream,
3067  						     sync_ptr.c.control.appl_ptr);
3068  			if (err < 0)
3069  				return err;
3070  		} else {
3071  			sync_ptr.c.control.appl_ptr = control->appl_ptr;
3072  		}
3073  		if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
3074  			control->avail_min = sync_ptr.c.control.avail_min;
3075  		else
3076  			sync_ptr.c.control.avail_min = control->avail_min;
3077  		sync_ptr.s.status.state = status->state;
3078  		sync_ptr.s.status.hw_ptr = status->hw_ptr;
3079  		sync_ptr.s.status.tstamp = status->tstamp;
3080  		sync_ptr.s.status.suspended_state = status->suspended_state;
3081  		sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
3082  	}
3083  	if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL))
3084  		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
3085  	if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
3086  		return -EFAULT;
3087  	return 0;
3088  }
3089  
3090  struct snd_pcm_mmap_status32 {
3091  	snd_pcm_state_t state;
3092  	s32 pad1;
3093  	u32 hw_ptr;
3094  	s32 tstamp_sec;
3095  	s32 tstamp_nsec;
3096  	snd_pcm_state_t suspended_state;
3097  	s32 audio_tstamp_sec;
3098  	s32 audio_tstamp_nsec;
3099  } __packed;
3100  
3101  struct snd_pcm_mmap_control32 {
3102  	u32 appl_ptr;
3103  	u32 avail_min;
3104  };
3105  
3106  struct snd_pcm_sync_ptr32 {
3107  	u32 flags;
3108  	union {
3109  		struct snd_pcm_mmap_status32 status;
3110  		unsigned char reserved[64];
3111  	} s;
3112  	union {
3113  		struct snd_pcm_mmap_control32 control;
3114  		unsigned char reserved[64];
3115  	} c;
3116  } __packed;
3117  
3118  /* recalculate the boundary within 32bit */
recalculate_boundary(struct snd_pcm_runtime * runtime)3119  static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime)
3120  {
3121  	snd_pcm_uframes_t boundary;
3122  
3123  	if (! runtime->buffer_size)
3124  		return 0;
3125  	boundary = runtime->buffer_size;
3126  	while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
3127  		boundary *= 2;
3128  	return boundary;
3129  }
3130  
snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream * substream,struct snd_pcm_sync_ptr32 __user * src)3131  static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
3132  					 struct snd_pcm_sync_ptr32 __user *src)
3133  {
3134  	struct snd_pcm_runtime *runtime = substream->runtime;
3135  	volatile struct snd_pcm_mmap_status *status;
3136  	volatile struct snd_pcm_mmap_control *control;
3137  	u32 sflags;
3138  	struct snd_pcm_mmap_control scontrol;
3139  	struct snd_pcm_mmap_status sstatus;
3140  	snd_pcm_uframes_t boundary;
3141  	int err;
3142  
3143  	if (snd_BUG_ON(!runtime))
3144  		return -EINVAL;
3145  
3146  	if (get_user(sflags, &src->flags) ||
3147  	    get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
3148  	    get_user(scontrol.avail_min, &src->c.control.avail_min))
3149  		return -EFAULT;
3150  	if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
3151  		err = snd_pcm_hwsync(substream);
3152  		if (err < 0)
3153  			return err;
3154  	}
3155  	status = runtime->status;
3156  	control = runtime->control;
3157  	boundary = recalculate_boundary(runtime);
3158  	if (! boundary)
3159  		boundary = 0x7fffffff;
3160  	scoped_guard(pcm_stream_lock_irq, substream) {
3161  		/* FIXME: we should consider the boundary for the sync from app */
3162  		if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) {
3163  			err = pcm_lib_apply_appl_ptr(substream,
3164  						     scontrol.appl_ptr);
3165  			if (err < 0)
3166  				return err;
3167  		} else
3168  			scontrol.appl_ptr = control->appl_ptr % boundary;
3169  		if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
3170  			control->avail_min = scontrol.avail_min;
3171  		else
3172  			scontrol.avail_min = control->avail_min;
3173  		sstatus.state = status->state;
3174  		sstatus.hw_ptr = status->hw_ptr % boundary;
3175  		sstatus.tstamp = status->tstamp;
3176  		sstatus.suspended_state = status->suspended_state;
3177  		sstatus.audio_tstamp = status->audio_tstamp;
3178  	}
3179  	if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
3180  		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
3181  	if (put_user(sstatus.state, &src->s.status.state) ||
3182  	    put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
3183  	    put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp_sec) ||
3184  	    put_user(sstatus.tstamp.tv_nsec, &src->s.status.tstamp_nsec) ||
3185  	    put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
3186  	    put_user(sstatus.audio_tstamp.tv_sec, &src->s.status.audio_tstamp_sec) ||
3187  	    put_user(sstatus.audio_tstamp.tv_nsec, &src->s.status.audio_tstamp_nsec) ||
3188  	    put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
3189  	    put_user(scontrol.avail_min, &src->c.control.avail_min))
3190  		return -EFAULT;
3191  
3192  	return 0;
3193  }
3194  #define __SNDRV_PCM_IOCTL_SYNC_PTR32 _IOWR('A', 0x23, struct snd_pcm_sync_ptr32)
3195  
snd_pcm_tstamp(struct snd_pcm_substream * substream,int __user * _arg)3196  static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
3197  {
3198  	struct snd_pcm_runtime *runtime = substream->runtime;
3199  	int arg;
3200  
3201  	if (get_user(arg, _arg))
3202  		return -EFAULT;
3203  	if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
3204  		return -EINVAL;
3205  	runtime->tstamp_type = arg;
3206  	return 0;
3207  }
3208  
snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream * substream,struct snd_xferi __user * _xferi)3209  static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream,
3210  				      struct snd_xferi __user *_xferi)
3211  {
3212  	struct snd_xferi xferi;
3213  	struct snd_pcm_runtime *runtime = substream->runtime;
3214  	snd_pcm_sframes_t result;
3215  
3216  	if (runtime->state == SNDRV_PCM_STATE_OPEN)
3217  		return -EBADFD;
3218  	if (put_user(0, &_xferi->result))
3219  		return -EFAULT;
3220  	if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
3221  		return -EFAULT;
3222  	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3223  		result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
3224  	else
3225  		result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
3226  	if (put_user(result, &_xferi->result))
3227  		return -EFAULT;
3228  	return result < 0 ? result : 0;
3229  }
3230  
snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream * substream,struct snd_xfern __user * _xfern)3231  static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream,
3232  				      struct snd_xfern __user *_xfern)
3233  {
3234  	struct snd_xfern xfern;
3235  	struct snd_pcm_runtime *runtime = substream->runtime;
3236  	void *bufs __free(kfree) = NULL;
3237  	snd_pcm_sframes_t result;
3238  
3239  	if (runtime->state == SNDRV_PCM_STATE_OPEN)
3240  		return -EBADFD;
3241  	if (runtime->channels > 128)
3242  		return -EINVAL;
3243  	if (put_user(0, &_xfern->result))
3244  		return -EFAULT;
3245  	if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
3246  		return -EFAULT;
3247  
3248  	bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels);
3249  	if (IS_ERR(bufs))
3250  		return PTR_ERR(bufs);
3251  	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3252  		result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
3253  	else
3254  		result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
3255  	if (put_user(result, &_xfern->result))
3256  		return -EFAULT;
3257  	return result < 0 ? result : 0;
3258  }
3259  
snd_pcm_rewind_ioctl(struct snd_pcm_substream * substream,snd_pcm_uframes_t __user * _frames)3260  static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream,
3261  				snd_pcm_uframes_t __user *_frames)
3262  {
3263  	snd_pcm_uframes_t frames;
3264  	snd_pcm_sframes_t result;
3265  
3266  	if (get_user(frames, _frames))
3267  		return -EFAULT;
3268  	if (put_user(0, _frames))
3269  		return -EFAULT;
3270  	result = snd_pcm_rewind(substream, frames);
3271  	if (put_user(result, _frames))
3272  		return -EFAULT;
3273  	return result < 0 ? result : 0;
3274  }
3275  
snd_pcm_forward_ioctl(struct snd_pcm_substream * substream,snd_pcm_uframes_t __user * _frames)3276  static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream,
3277  				 snd_pcm_uframes_t __user *_frames)
3278  {
3279  	snd_pcm_uframes_t frames;
3280  	snd_pcm_sframes_t result;
3281  
3282  	if (get_user(frames, _frames))
3283  		return -EFAULT;
3284  	if (put_user(0, _frames))
3285  		return -EFAULT;
3286  	result = snd_pcm_forward(substream, frames);
3287  	if (put_user(result, _frames))
3288  		return -EFAULT;
3289  	return result < 0 ? result : 0;
3290  }
3291  
snd_pcm_common_ioctl(struct file * file,struct snd_pcm_substream * substream,unsigned int cmd,void __user * arg)3292  static int snd_pcm_common_ioctl(struct file *file,
3293  				 struct snd_pcm_substream *substream,
3294  				 unsigned int cmd, void __user *arg)
3295  {
3296  	struct snd_pcm_file *pcm_file = file->private_data;
3297  	int res;
3298  
3299  	if (PCM_RUNTIME_CHECK(substream))
3300  		return -ENXIO;
3301  
3302  	if (substream->runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3303  		return -EBADFD;
3304  
3305  	res = snd_power_wait(substream->pcm->card);
3306  	if (res < 0)
3307  		return res;
3308  
3309  	switch (cmd) {
3310  	case SNDRV_PCM_IOCTL_PVERSION:
3311  		return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
3312  	case SNDRV_PCM_IOCTL_INFO:
3313  		return snd_pcm_info_user(substream, arg);
3314  	case SNDRV_PCM_IOCTL_TSTAMP:	/* just for compatibility */
3315  		return 0;
3316  	case SNDRV_PCM_IOCTL_TTSTAMP:
3317  		return snd_pcm_tstamp(substream, arg);
3318  	case SNDRV_PCM_IOCTL_USER_PVERSION:
3319  		if (get_user(pcm_file->user_pversion,
3320  			     (unsigned int __user *)arg))
3321  			return -EFAULT;
3322  		return 0;
3323  	case SNDRV_PCM_IOCTL_HW_REFINE:
3324  		return snd_pcm_hw_refine_user(substream, arg);
3325  	case SNDRV_PCM_IOCTL_HW_PARAMS:
3326  		return snd_pcm_hw_params_user(substream, arg);
3327  	case SNDRV_PCM_IOCTL_HW_FREE:
3328  		return snd_pcm_hw_free(substream);
3329  	case SNDRV_PCM_IOCTL_SW_PARAMS:
3330  		return snd_pcm_sw_params_user(substream, arg);
3331  	case SNDRV_PCM_IOCTL_STATUS32:
3332  		return snd_pcm_status_user32(substream, arg, false);
3333  	case SNDRV_PCM_IOCTL_STATUS_EXT32:
3334  		return snd_pcm_status_user32(substream, arg, true);
3335  	case SNDRV_PCM_IOCTL_STATUS64:
3336  		return snd_pcm_status_user64(substream, arg, false);
3337  	case SNDRV_PCM_IOCTL_STATUS_EXT64:
3338  		return snd_pcm_status_user64(substream, arg, true);
3339  	case SNDRV_PCM_IOCTL_CHANNEL_INFO:
3340  		return snd_pcm_channel_info_user(substream, arg);
3341  	case SNDRV_PCM_IOCTL_PREPARE:
3342  		return snd_pcm_prepare(substream, file);
3343  	case SNDRV_PCM_IOCTL_RESET:
3344  		return snd_pcm_reset(substream);
3345  	case SNDRV_PCM_IOCTL_START:
3346  		return snd_pcm_start_lock_irq(substream);
3347  	case SNDRV_PCM_IOCTL_LINK:
3348  		return snd_pcm_link(substream, (int)(unsigned long) arg);
3349  	case SNDRV_PCM_IOCTL_UNLINK:
3350  		return snd_pcm_unlink(substream);
3351  	case SNDRV_PCM_IOCTL_RESUME:
3352  		return snd_pcm_resume(substream);
3353  	case SNDRV_PCM_IOCTL_XRUN:
3354  		return snd_pcm_xrun(substream);
3355  	case SNDRV_PCM_IOCTL_HWSYNC:
3356  		return snd_pcm_hwsync(substream);
3357  	case SNDRV_PCM_IOCTL_DELAY:
3358  	{
3359  		snd_pcm_sframes_t delay = 0;
3360  		snd_pcm_sframes_t __user *res = arg;
3361  		int err;
3362  
3363  		err = snd_pcm_delay(substream, &delay);
3364  		if (err)
3365  			return err;
3366  		if (put_user(delay, res))
3367  			return -EFAULT;
3368  		return 0;
3369  	}
3370  	case __SNDRV_PCM_IOCTL_SYNC_PTR32:
3371  		return snd_pcm_ioctl_sync_ptr_compat(substream, arg);
3372  	case __SNDRV_PCM_IOCTL_SYNC_PTR64:
3373  		return snd_pcm_sync_ptr(substream, arg);
3374  #ifdef CONFIG_SND_SUPPORT_OLD_API
3375  	case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
3376  		return snd_pcm_hw_refine_old_user(substream, arg);
3377  	case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
3378  		return snd_pcm_hw_params_old_user(substream, arg);
3379  #endif
3380  	case SNDRV_PCM_IOCTL_DRAIN:
3381  		return snd_pcm_drain(substream, file);
3382  	case SNDRV_PCM_IOCTL_DROP:
3383  		return snd_pcm_drop(substream);
3384  	case SNDRV_PCM_IOCTL_PAUSE:
3385  		return snd_pcm_pause_lock_irq(substream, (unsigned long)arg);
3386  	case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
3387  	case SNDRV_PCM_IOCTL_READI_FRAMES:
3388  		return snd_pcm_xferi_frames_ioctl(substream, arg);
3389  	case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
3390  	case SNDRV_PCM_IOCTL_READN_FRAMES:
3391  		return snd_pcm_xfern_frames_ioctl(substream, arg);
3392  	case SNDRV_PCM_IOCTL_REWIND:
3393  		return snd_pcm_rewind_ioctl(substream, arg);
3394  	case SNDRV_PCM_IOCTL_FORWARD:
3395  		return snd_pcm_forward_ioctl(substream, arg);
3396  	}
3397  	pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
3398  	return -ENOTTY;
3399  }
3400  
snd_pcm_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3401  static long snd_pcm_ioctl(struct file *file, unsigned int cmd,
3402  			  unsigned long arg)
3403  {
3404  	struct snd_pcm_file *pcm_file;
3405  
3406  	pcm_file = file->private_data;
3407  
3408  	if (((cmd >> 8) & 0xff) != 'A')
3409  		return -ENOTTY;
3410  
3411  	return snd_pcm_common_ioctl(file, pcm_file->substream, cmd,
3412  				     (void __user *)arg);
3413  }
3414  
3415  /**
3416   * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
3417   * @substream: PCM substream
3418   * @cmd: IOCTL cmd
3419   * @arg: IOCTL argument
3420   *
3421   * The function is provided primarily for OSS layer and USB gadget drivers,
3422   * and it allows only the limited set of ioctls (hw_params, sw_params,
3423   * prepare, start, drain, drop, forward).
3424   *
3425   * Return: zero if successful, or a negative error code
3426   */
snd_pcm_kernel_ioctl(struct snd_pcm_substream * substream,unsigned int cmd,void * arg)3427  int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3428  			 unsigned int cmd, void *arg)
3429  {
3430  	snd_pcm_uframes_t *frames = arg;
3431  	snd_pcm_sframes_t result;
3432  
3433  	if (substream->runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3434  		return -EBADFD;
3435  
3436  	switch (cmd) {
3437  	case SNDRV_PCM_IOCTL_FORWARD:
3438  	{
3439  		/* provided only for OSS; capture-only and no value returned */
3440  		if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
3441  			return -EINVAL;
3442  		result = snd_pcm_forward(substream, *frames);
3443  		return result < 0 ? result : 0;
3444  	}
3445  	case SNDRV_PCM_IOCTL_HW_PARAMS:
3446  		return snd_pcm_hw_params(substream, arg);
3447  	case SNDRV_PCM_IOCTL_SW_PARAMS:
3448  		return snd_pcm_sw_params(substream, arg);
3449  	case SNDRV_PCM_IOCTL_PREPARE:
3450  		return snd_pcm_prepare(substream, NULL);
3451  	case SNDRV_PCM_IOCTL_START:
3452  		return snd_pcm_start_lock_irq(substream);
3453  	case SNDRV_PCM_IOCTL_DRAIN:
3454  		return snd_pcm_drain(substream, NULL);
3455  	case SNDRV_PCM_IOCTL_DROP:
3456  		return snd_pcm_drop(substream);
3457  	case SNDRV_PCM_IOCTL_DELAY:
3458  		return snd_pcm_delay(substream, frames);
3459  	default:
3460  		return -EINVAL;
3461  	}
3462  }
3463  EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
3464  
snd_pcm_read(struct file * file,char __user * buf,size_t count,loff_t * offset)3465  static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
3466  			    loff_t * offset)
3467  {
3468  	struct snd_pcm_file *pcm_file;
3469  	struct snd_pcm_substream *substream;
3470  	struct snd_pcm_runtime *runtime;
3471  	snd_pcm_sframes_t result;
3472  
3473  	pcm_file = file->private_data;
3474  	substream = pcm_file->substream;
3475  	if (PCM_RUNTIME_CHECK(substream))
3476  		return -ENXIO;
3477  	runtime = substream->runtime;
3478  	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
3479  	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3480  		return -EBADFD;
3481  	if (!frame_aligned(runtime, count))
3482  		return -EINVAL;
3483  	count = bytes_to_frames(runtime, count);
3484  	result = snd_pcm_lib_read(substream, buf, count);
3485  	if (result > 0)
3486  		result = frames_to_bytes(runtime, result);
3487  	return result;
3488  }
3489  
snd_pcm_write(struct file * file,const char __user * buf,size_t count,loff_t * offset)3490  static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
3491  			     size_t count, loff_t * offset)
3492  {
3493  	struct snd_pcm_file *pcm_file;
3494  	struct snd_pcm_substream *substream;
3495  	struct snd_pcm_runtime *runtime;
3496  	snd_pcm_sframes_t result;
3497  
3498  	pcm_file = file->private_data;
3499  	substream = pcm_file->substream;
3500  	if (PCM_RUNTIME_CHECK(substream))
3501  		return -ENXIO;
3502  	runtime = substream->runtime;
3503  	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
3504  	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3505  		return -EBADFD;
3506  	if (!frame_aligned(runtime, count))
3507  		return -EINVAL;
3508  	count = bytes_to_frames(runtime, count);
3509  	result = snd_pcm_lib_write(substream, buf, count);
3510  	if (result > 0)
3511  		result = frames_to_bytes(runtime, result);
3512  	return result;
3513  }
3514  
snd_pcm_readv(struct kiocb * iocb,struct iov_iter * to)3515  static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
3516  {
3517  	struct snd_pcm_file *pcm_file;
3518  	struct snd_pcm_substream *substream;
3519  	struct snd_pcm_runtime *runtime;
3520  	snd_pcm_sframes_t result;
3521  	unsigned long i;
3522  	void __user **bufs __free(kfree) = NULL;
3523  	snd_pcm_uframes_t frames;
3524  	const struct iovec *iov = iter_iov(to);
3525  
3526  	pcm_file = iocb->ki_filp->private_data;
3527  	substream = pcm_file->substream;
3528  	if (PCM_RUNTIME_CHECK(substream))
3529  		return -ENXIO;
3530  	runtime = substream->runtime;
3531  	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
3532  	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3533  		return -EBADFD;
3534  	if (!user_backed_iter(to))
3535  		return -EINVAL;
3536  	if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
3537  		return -EINVAL;
3538  	if (!frame_aligned(runtime, iov->iov_len))
3539  		return -EINVAL;
3540  	frames = bytes_to_samples(runtime, iov->iov_len);
3541  	bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
3542  	if (bufs == NULL)
3543  		return -ENOMEM;
3544  	for (i = 0; i < to->nr_segs; ++i) {
3545  		bufs[i] = iov->iov_base;
3546  		iov++;
3547  	}
3548  	result = snd_pcm_lib_readv(substream, bufs, frames);
3549  	if (result > 0)
3550  		result = frames_to_bytes(runtime, result);
3551  	return result;
3552  }
3553  
snd_pcm_writev(struct kiocb * iocb,struct iov_iter * from)3554  static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
3555  {
3556  	struct snd_pcm_file *pcm_file;
3557  	struct snd_pcm_substream *substream;
3558  	struct snd_pcm_runtime *runtime;
3559  	snd_pcm_sframes_t result;
3560  	unsigned long i;
3561  	void __user **bufs __free(kfree) = NULL;
3562  	snd_pcm_uframes_t frames;
3563  	const struct iovec *iov = iter_iov(from);
3564  
3565  	pcm_file = iocb->ki_filp->private_data;
3566  	substream = pcm_file->substream;
3567  	if (PCM_RUNTIME_CHECK(substream))
3568  		return -ENXIO;
3569  	runtime = substream->runtime;
3570  	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
3571  	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3572  		return -EBADFD;
3573  	if (!user_backed_iter(from))
3574  		return -EINVAL;
3575  	if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
3576  	    !frame_aligned(runtime, iov->iov_len))
3577  		return -EINVAL;
3578  	frames = bytes_to_samples(runtime, iov->iov_len);
3579  	bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
3580  	if (bufs == NULL)
3581  		return -ENOMEM;
3582  	for (i = 0; i < from->nr_segs; ++i) {
3583  		bufs[i] = iov->iov_base;
3584  		iov++;
3585  	}
3586  	result = snd_pcm_lib_writev(substream, bufs, frames);
3587  	if (result > 0)
3588  		result = frames_to_bytes(runtime, result);
3589  	return result;
3590  }
3591  
snd_pcm_poll(struct file * file,poll_table * wait)3592  static __poll_t snd_pcm_poll(struct file *file, poll_table *wait)
3593  {
3594  	struct snd_pcm_file *pcm_file;
3595  	struct snd_pcm_substream *substream;
3596  	struct snd_pcm_runtime *runtime;
3597  	__poll_t mask, ok;
3598  	snd_pcm_uframes_t avail;
3599  
3600  	pcm_file = file->private_data;
3601  
3602  	substream = pcm_file->substream;
3603  	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3604  		ok = EPOLLOUT | EPOLLWRNORM;
3605  	else
3606  		ok = EPOLLIN | EPOLLRDNORM;
3607  	if (PCM_RUNTIME_CHECK(substream))
3608  		return ok | EPOLLERR;
3609  
3610  	runtime = substream->runtime;
3611  	if (runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3612  		return ok | EPOLLERR;
3613  
3614  	poll_wait(file, &runtime->sleep, wait);
3615  
3616  	mask = 0;
3617  	guard(pcm_stream_lock_irq)(substream);
3618  	avail = snd_pcm_avail(substream);
3619  	switch (runtime->state) {
3620  	case SNDRV_PCM_STATE_RUNNING:
3621  	case SNDRV_PCM_STATE_PREPARED:
3622  	case SNDRV_PCM_STATE_PAUSED:
3623  		if (avail >= runtime->control->avail_min)
3624  			mask = ok;
3625  		break;
3626  	case SNDRV_PCM_STATE_DRAINING:
3627  		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
3628  			mask = ok;
3629  			if (!avail)
3630  				mask |= EPOLLERR;
3631  		}
3632  		break;
3633  	default:
3634  		mask = ok | EPOLLERR;
3635  		break;
3636  	}
3637  	return mask;
3638  }
3639  
3640  /*
3641   * mmap support
3642   */
3643  
3644  /*
3645   * Only on coherent architectures, we can mmap the status and the control records
3646   * for effcient data transfer.  On others, we have to use HWSYNC ioctl...
3647   */
3648  #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3649  /*
3650   * mmap status record
3651   */
snd_pcm_mmap_status_fault(struct vm_fault * vmf)3652  static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3653  {
3654  	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3655  	struct snd_pcm_runtime *runtime;
3656  
3657  	if (substream == NULL)
3658  		return VM_FAULT_SIGBUS;
3659  	runtime = substream->runtime;
3660  	vmf->page = virt_to_page(runtime->status);
3661  	get_page(vmf->page);
3662  	return 0;
3663  }
3664  
3665  static const struct vm_operations_struct snd_pcm_vm_ops_status =
3666  {
3667  	.fault =	snd_pcm_mmap_status_fault,
3668  };
3669  
snd_pcm_mmap_status(struct snd_pcm_substream * substream,struct file * file,struct vm_area_struct * area)3670  static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3671  			       struct vm_area_struct *area)
3672  {
3673  	long size;
3674  	if (!(area->vm_flags & VM_READ))
3675  		return -EINVAL;
3676  	size = area->vm_end - area->vm_start;
3677  	if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3678  		return -EINVAL;
3679  	area->vm_ops = &snd_pcm_vm_ops_status;
3680  	area->vm_private_data = substream;
3681  	vm_flags_mod(area, VM_DONTEXPAND | VM_DONTDUMP,
3682  		     VM_WRITE | VM_MAYWRITE);
3683  
3684  	return 0;
3685  }
3686  
3687  /*
3688   * mmap control record
3689   */
snd_pcm_mmap_control_fault(struct vm_fault * vmf)3690  static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3691  {
3692  	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3693  	struct snd_pcm_runtime *runtime;
3694  
3695  	if (substream == NULL)
3696  		return VM_FAULT_SIGBUS;
3697  	runtime = substream->runtime;
3698  	vmf->page = virt_to_page(runtime->control);
3699  	get_page(vmf->page);
3700  	return 0;
3701  }
3702  
3703  static const struct vm_operations_struct snd_pcm_vm_ops_control =
3704  {
3705  	.fault =	snd_pcm_mmap_control_fault,
3706  };
3707  
snd_pcm_mmap_control(struct snd_pcm_substream * substream,struct file * file,struct vm_area_struct * area)3708  static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3709  				struct vm_area_struct *area)
3710  {
3711  	long size;
3712  	if (!(area->vm_flags & VM_READ))
3713  		return -EINVAL;
3714  	size = area->vm_end - area->vm_start;
3715  	if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3716  		return -EINVAL;
3717  	area->vm_ops = &snd_pcm_vm_ops_control;
3718  	area->vm_private_data = substream;
3719  	vm_flags_set(area, VM_DONTEXPAND | VM_DONTDUMP);
3720  	return 0;
3721  }
3722  
pcm_status_mmap_allowed(struct snd_pcm_file * pcm_file)3723  static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
3724  {
3725  	/* If drivers require the explicit sync (typically for non-coherent
3726  	 * pages), we have to disable the mmap of status and control data
3727  	 * to enforce the control via SYNC_PTR ioctl.
3728  	 */
3729  	if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_EXPLICIT_SYNC)
3730  		return false;
3731  	/* See pcm_control_mmap_allowed() below.
3732  	 * Since older alsa-lib requires both status and control mmaps to be
3733  	 * coupled, we have to disable the status mmap for old alsa-lib, too.
3734  	 */
3735  	if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
3736  	    (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
3737  		return false;
3738  	return true;
3739  }
3740  
pcm_control_mmap_allowed(struct snd_pcm_file * pcm_file)3741  static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
3742  {
3743  	if (pcm_file->no_compat_mmap)
3744  		return false;
3745  	/* see above */
3746  	if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_EXPLICIT_SYNC)
3747  		return false;
3748  	/* Disallow the control mmap when SYNC_APPLPTR flag is set;
3749  	 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3750  	 * thus it effectively assures the manual update of appl_ptr.
3751  	 */
3752  	if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
3753  		return false;
3754  	return true;
3755  }
3756  
3757  #else /* ! coherent mmap */
3758  /*
3759   * don't support mmap for status and control records.
3760   */
3761  #define pcm_status_mmap_allowed(pcm_file)	false
3762  #define pcm_control_mmap_allowed(pcm_file)	false
3763  
snd_pcm_mmap_status(struct snd_pcm_substream * substream,struct file * file,struct vm_area_struct * area)3764  static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3765  			       struct vm_area_struct *area)
3766  {
3767  	return -ENXIO;
3768  }
snd_pcm_mmap_control(struct snd_pcm_substream * substream,struct file * file,struct vm_area_struct * area)3769  static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3770  				struct vm_area_struct *area)
3771  {
3772  	return -ENXIO;
3773  }
3774  #endif /* coherent mmap */
3775  
3776  /*
3777   * fault callback for mmapping a RAM page
3778   */
snd_pcm_mmap_data_fault(struct vm_fault * vmf)3779  static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3780  {
3781  	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3782  	struct snd_pcm_runtime *runtime;
3783  	unsigned long offset;
3784  	struct page * page;
3785  	size_t dma_bytes;
3786  
3787  	if (substream == NULL)
3788  		return VM_FAULT_SIGBUS;
3789  	runtime = substream->runtime;
3790  	offset = vmf->pgoff << PAGE_SHIFT;
3791  	dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3792  	if (offset > dma_bytes - PAGE_SIZE)
3793  		return VM_FAULT_SIGBUS;
3794  	if (substream->ops->page)
3795  		page = substream->ops->page(substream, offset);
3796  	else if (!snd_pcm_get_dma_buf(substream))
3797  		page = virt_to_page(runtime->dma_area + offset);
3798  	else
3799  		page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset);
3800  	if (!page)
3801  		return VM_FAULT_SIGBUS;
3802  	get_page(page);
3803  	vmf->page = page;
3804  	return 0;
3805  }
3806  
3807  static const struct vm_operations_struct snd_pcm_vm_ops_data = {
3808  	.open =		snd_pcm_mmap_data_open,
3809  	.close =	snd_pcm_mmap_data_close,
3810  };
3811  
3812  static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3813  	.open =		snd_pcm_mmap_data_open,
3814  	.close =	snd_pcm_mmap_data_close,
3815  	.fault =	snd_pcm_mmap_data_fault,
3816  };
3817  
3818  /*
3819   * mmap the DMA buffer on RAM
3820   */
3821  
3822  /**
3823   * snd_pcm_lib_default_mmap - Default PCM data mmap function
3824   * @substream: PCM substream
3825   * @area: VMA
3826   *
3827   * This is the default mmap handler for PCM data.  When mmap pcm_ops is NULL,
3828   * this function is invoked implicitly.
3829   *
3830   * Return: zero if successful, or a negative error code
3831   */
snd_pcm_lib_default_mmap(struct snd_pcm_substream * substream,struct vm_area_struct * area)3832  int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3833  			     struct vm_area_struct *area)
3834  {
3835  	vm_flags_set(area, VM_DONTEXPAND | VM_DONTDUMP);
3836  	if (!substream->ops->page &&
3837  	    !snd_dma_buffer_mmap(snd_pcm_get_dma_buf(substream), area))
3838  		return 0;
3839  	/* mmap with fault handler */
3840  	area->vm_ops = &snd_pcm_vm_ops_data_fault;
3841  	return 0;
3842  }
3843  EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3844  
3845  /*
3846   * mmap the DMA buffer on I/O memory area
3847   */
3848  #if SNDRV_PCM_INFO_MMAP_IOMEM
3849  /**
3850   * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3851   * @substream: PCM substream
3852   * @area: VMA
3853   *
3854   * When your hardware uses the iomapped pages as the hardware buffer and
3855   * wants to mmap it, pass this function as mmap pcm_ops.  Note that this
3856   * is supposed to work only on limited architectures.
3857   *
3858   * Return: zero if successful, or a negative error code
3859   */
snd_pcm_lib_mmap_iomem(struct snd_pcm_substream * substream,struct vm_area_struct * area)3860  int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3861  			   struct vm_area_struct *area)
3862  {
3863  	struct snd_pcm_runtime *runtime = substream->runtime;
3864  
3865  	area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3866  	return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3867  }
3868  EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
3869  #endif /* SNDRV_PCM_INFO_MMAP */
3870  
3871  /*
3872   * mmap DMA buffer
3873   */
snd_pcm_mmap_data(struct snd_pcm_substream * substream,struct file * file,struct vm_area_struct * area)3874  int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
3875  		      struct vm_area_struct *area)
3876  {
3877  	struct snd_pcm_runtime *runtime;
3878  	long size;
3879  	unsigned long offset;
3880  	size_t dma_bytes;
3881  	int err;
3882  
3883  	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3884  		if (!(area->vm_flags & (VM_WRITE|VM_READ)))
3885  			return -EINVAL;
3886  	} else {
3887  		if (!(area->vm_flags & VM_READ))
3888  			return -EINVAL;
3889  	}
3890  	runtime = substream->runtime;
3891  	if (runtime->state == SNDRV_PCM_STATE_OPEN)
3892  		return -EBADFD;
3893  	if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
3894  		return -ENXIO;
3895  	if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
3896  	    runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
3897  		return -EINVAL;
3898  	size = area->vm_end - area->vm_start;
3899  	offset = area->vm_pgoff << PAGE_SHIFT;
3900  	dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3901  	if ((size_t)size > dma_bytes)
3902  		return -EINVAL;
3903  	if (offset > dma_bytes - size)
3904  		return -EINVAL;
3905  
3906  	area->vm_ops = &snd_pcm_vm_ops_data;
3907  	area->vm_private_data = substream;
3908  	if (substream->ops->mmap)
3909  		err = substream->ops->mmap(substream, area);
3910  	else
3911  		err = snd_pcm_lib_default_mmap(substream, area);
3912  	if (!err)
3913  		atomic_inc(&substream->mmap_count);
3914  	return err;
3915  }
3916  EXPORT_SYMBOL(snd_pcm_mmap_data);
3917  
snd_pcm_mmap(struct file * file,struct vm_area_struct * area)3918  static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
3919  {
3920  	struct snd_pcm_file * pcm_file;
3921  	struct snd_pcm_substream *substream;
3922  	unsigned long offset;
3923  
3924  	pcm_file = file->private_data;
3925  	substream = pcm_file->substream;
3926  	if (PCM_RUNTIME_CHECK(substream))
3927  		return -ENXIO;
3928  	if (substream->runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3929  		return -EBADFD;
3930  
3931  	offset = area->vm_pgoff << PAGE_SHIFT;
3932  	switch (offset) {
3933  	case SNDRV_PCM_MMAP_OFFSET_STATUS_OLD:
3934  		if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT))
3935  			return -ENXIO;
3936  		fallthrough;
3937  	case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW:
3938  		if (!pcm_status_mmap_allowed(pcm_file))
3939  			return -ENXIO;
3940  		return snd_pcm_mmap_status(substream, file, area);
3941  	case SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD:
3942  		if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT))
3943  			return -ENXIO;
3944  		fallthrough;
3945  	case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW:
3946  		if (!pcm_control_mmap_allowed(pcm_file))
3947  			return -ENXIO;
3948  		return snd_pcm_mmap_control(substream, file, area);
3949  	default:
3950  		return snd_pcm_mmap_data(substream, file, area);
3951  	}
3952  	return 0;
3953  }
3954  
snd_pcm_fasync(int fd,struct file * file,int on)3955  static int snd_pcm_fasync(int fd, struct file * file, int on)
3956  {
3957  	struct snd_pcm_file * pcm_file;
3958  	struct snd_pcm_substream *substream;
3959  	struct snd_pcm_runtime *runtime;
3960  
3961  	pcm_file = file->private_data;
3962  	substream = pcm_file->substream;
3963  	if (PCM_RUNTIME_CHECK(substream))
3964  		return -ENXIO;
3965  	runtime = substream->runtime;
3966  	if (runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3967  		return -EBADFD;
3968  	return snd_fasync_helper(fd, file, on, &runtime->fasync);
3969  }
3970  
3971  /*
3972   * ioctl32 compat
3973   */
3974  #ifdef CONFIG_COMPAT
3975  #include "pcm_compat.c"
3976  #else
3977  #define snd_pcm_ioctl_compat	NULL
3978  #endif
3979  
3980  /*
3981   *  To be removed helpers to keep binary compatibility
3982   */
3983  
3984  #ifdef CONFIG_SND_SUPPORT_OLD_API
3985  #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
3986  #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
3987  
snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params * params,struct snd_pcm_hw_params_old * oparams)3988  static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
3989  					       struct snd_pcm_hw_params_old *oparams)
3990  {
3991  	unsigned int i;
3992  
3993  	memset(params, 0, sizeof(*params));
3994  	params->flags = oparams->flags;
3995  	for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3996  		params->masks[i].bits[0] = oparams->masks[i];
3997  	memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
3998  	params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
3999  	params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
4000  	params->info = oparams->info;
4001  	params->msbits = oparams->msbits;
4002  	params->rate_num = oparams->rate_num;
4003  	params->rate_den = oparams->rate_den;
4004  	params->fifo_size = oparams->fifo_size;
4005  }
4006  
snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old * oparams,struct snd_pcm_hw_params * params)4007  static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
4008  					     struct snd_pcm_hw_params *params)
4009  {
4010  	unsigned int i;
4011  
4012  	memset(oparams, 0, sizeof(*oparams));
4013  	oparams->flags = params->flags;
4014  	for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
4015  		oparams->masks[i] = params->masks[i].bits[0];
4016  	memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
4017  	oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
4018  	oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
4019  	oparams->info = params->info;
4020  	oparams->msbits = params->msbits;
4021  	oparams->rate_num = params->rate_num;
4022  	oparams->rate_den = params->rate_den;
4023  	oparams->fifo_size = params->fifo_size;
4024  }
4025  
snd_pcm_hw_refine_old_user(struct snd_pcm_substream * substream,struct snd_pcm_hw_params_old __user * _oparams)4026  static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
4027  				      struct snd_pcm_hw_params_old __user * _oparams)
4028  {
4029  	struct snd_pcm_hw_params *params __free(kfree) = NULL;
4030  	struct snd_pcm_hw_params_old *oparams __free(kfree) = NULL;
4031  	int err;
4032  
4033  	params = kmalloc(sizeof(*params), GFP_KERNEL);
4034  	if (!params)
4035  		return -ENOMEM;
4036  
4037  	oparams = memdup_user(_oparams, sizeof(*oparams));
4038  	if (IS_ERR(oparams))
4039  		return PTR_ERR(oparams);
4040  	snd_pcm_hw_convert_from_old_params(params, oparams);
4041  	err = snd_pcm_hw_refine(substream, params);
4042  	if (err < 0)
4043  		return err;
4044  
4045  	err = fixup_unreferenced_params(substream, params);
4046  	if (err < 0)
4047  		return err;
4048  
4049  	snd_pcm_hw_convert_to_old_params(oparams, params);
4050  	if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
4051  		return -EFAULT;
4052  	return 0;
4053  }
4054  
snd_pcm_hw_params_old_user(struct snd_pcm_substream * substream,struct snd_pcm_hw_params_old __user * _oparams)4055  static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
4056  				      struct snd_pcm_hw_params_old __user * _oparams)
4057  {
4058  	struct snd_pcm_hw_params *params __free(kfree) = NULL;
4059  	struct snd_pcm_hw_params_old *oparams __free(kfree) = NULL;
4060  	int err;
4061  
4062  	params = kmalloc(sizeof(*params), GFP_KERNEL);
4063  	if (!params)
4064  		return -ENOMEM;
4065  
4066  	oparams = memdup_user(_oparams, sizeof(*oparams));
4067  	if (IS_ERR(oparams))
4068  		return PTR_ERR(oparams);
4069  
4070  	snd_pcm_hw_convert_from_old_params(params, oparams);
4071  	err = snd_pcm_hw_params(substream, params);
4072  	if (err < 0)
4073  		return err;
4074  
4075  	snd_pcm_hw_convert_to_old_params(oparams, params);
4076  	if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
4077  		return -EFAULT;
4078  	return 0;
4079  }
4080  #endif /* CONFIG_SND_SUPPORT_OLD_API */
4081  
4082  #ifndef CONFIG_MMU
snd_pcm_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)4083  static unsigned long snd_pcm_get_unmapped_area(struct file *file,
4084  					       unsigned long addr,
4085  					       unsigned long len,
4086  					       unsigned long pgoff,
4087  					       unsigned long flags)
4088  {
4089  	struct snd_pcm_file *pcm_file = file->private_data;
4090  	struct snd_pcm_substream *substream = pcm_file->substream;
4091  	struct snd_pcm_runtime *runtime = substream->runtime;
4092  	unsigned long offset = pgoff << PAGE_SHIFT;
4093  
4094  	switch (offset) {
4095  	case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW:
4096  		return (unsigned long)runtime->status;
4097  	case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW:
4098  		return (unsigned long)runtime->control;
4099  	default:
4100  		return (unsigned long)runtime->dma_area + offset;
4101  	}
4102  }
4103  #else
4104  # define snd_pcm_get_unmapped_area NULL
4105  #endif
4106  
4107  /*
4108   *  Register section
4109   */
4110  
4111  const struct file_operations snd_pcm_f_ops[2] = {
4112  	{
4113  		.owner =		THIS_MODULE,
4114  		.write =		snd_pcm_write,
4115  		.write_iter =		snd_pcm_writev,
4116  		.open =			snd_pcm_playback_open,
4117  		.release =		snd_pcm_release,
4118  		.poll =			snd_pcm_poll,
4119  		.unlocked_ioctl =	snd_pcm_ioctl,
4120  		.compat_ioctl = 	snd_pcm_ioctl_compat,
4121  		.mmap =			snd_pcm_mmap,
4122  		.fasync =		snd_pcm_fasync,
4123  		.get_unmapped_area =	snd_pcm_get_unmapped_area,
4124  	},
4125  	{
4126  		.owner =		THIS_MODULE,
4127  		.read =			snd_pcm_read,
4128  		.read_iter =		snd_pcm_readv,
4129  		.open =			snd_pcm_capture_open,
4130  		.release =		snd_pcm_release,
4131  		.poll =			snd_pcm_poll,
4132  		.unlocked_ioctl =	snd_pcm_ioctl,
4133  		.compat_ioctl = 	snd_pcm_ioctl_compat,
4134  		.mmap =			snd_pcm_mmap,
4135  		.fasync =		snd_pcm_fasync,
4136  		.get_unmapped_area =	snd_pcm_get_unmapped_area,
4137  	}
4138  };
4139