1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * comedi_buf.c
4   *
5   * COMEDI - Linux Control and Measurement Device Interface
6   * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
7   * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
8   */
9  
10  #include <linux/vmalloc.h>
11  #include <linux/slab.h>
12  #include <linux/comedi/comedidev.h>
13  #include "comedi_internal.h"
14  
15  #ifdef PAGE_KERNEL_NOCACHE
16  #define COMEDI_PAGE_PROTECTION		PAGE_KERNEL_NOCACHE
17  #else
18  #define COMEDI_PAGE_PROTECTION		PAGE_KERNEL
19  #endif
20  
comedi_buf_map_kref_release(struct kref * kref)21  static void comedi_buf_map_kref_release(struct kref *kref)
22  {
23  	struct comedi_buf_map *bm =
24  		container_of(kref, struct comedi_buf_map, refcount);
25  	struct comedi_buf_page *buf;
26  	unsigned int i;
27  
28  	if (bm->page_list) {
29  		if (bm->dma_dir != DMA_NONE) {
30  			/*
31  			 * DMA buffer was allocated as a single block.
32  			 * Address is in page_list[0].
33  			 */
34  			buf = &bm->page_list[0];
35  			dma_free_coherent(bm->dma_hw_dev,
36  					  PAGE_SIZE * bm->n_pages,
37  					  buf->virt_addr, buf->dma_addr);
38  		} else {
39  			for (i = 0; i < bm->n_pages; i++) {
40  				buf = &bm->page_list[i];
41  				ClearPageReserved(virt_to_page(buf->virt_addr));
42  				free_page((unsigned long)buf->virt_addr);
43  			}
44  		}
45  		vfree(bm->page_list);
46  	}
47  	if (bm->dma_dir != DMA_NONE)
48  		put_device(bm->dma_hw_dev);
49  	kfree(bm);
50  }
51  
__comedi_buf_free(struct comedi_device * dev,struct comedi_subdevice * s)52  static void __comedi_buf_free(struct comedi_device *dev,
53  			      struct comedi_subdevice *s)
54  {
55  	struct comedi_async *async = s->async;
56  	struct comedi_buf_map *bm;
57  	unsigned long flags;
58  
59  	if (async->prealloc_buf) {
60  		if (s->async_dma_dir == DMA_NONE)
61  			vunmap(async->prealloc_buf);
62  		async->prealloc_buf = NULL;
63  		async->prealloc_bufsz = 0;
64  	}
65  
66  	spin_lock_irqsave(&s->spin_lock, flags);
67  	bm = async->buf_map;
68  	async->buf_map = NULL;
69  	spin_unlock_irqrestore(&s->spin_lock, flags);
70  	comedi_buf_map_put(bm);
71  }
72  
73  static struct comedi_buf_map *
comedi_buf_map_alloc(struct comedi_device * dev,enum dma_data_direction dma_dir,unsigned int n_pages)74  comedi_buf_map_alloc(struct comedi_device *dev, enum dma_data_direction dma_dir,
75  		     unsigned int n_pages)
76  {
77  	struct comedi_buf_map *bm;
78  	struct comedi_buf_page *buf;
79  	unsigned int i;
80  
81  	bm = kzalloc(sizeof(*bm), GFP_KERNEL);
82  	if (!bm)
83  		return NULL;
84  
85  	kref_init(&bm->refcount);
86  	bm->dma_dir = dma_dir;
87  	if (bm->dma_dir != DMA_NONE) {
88  		/* Need ref to hardware device to free buffer later. */
89  		bm->dma_hw_dev = get_device(dev->hw_dev);
90  	}
91  
92  	bm->page_list = vzalloc(sizeof(*buf) * n_pages);
93  	if (!bm->page_list)
94  		goto err;
95  
96  	if (bm->dma_dir != DMA_NONE) {
97  		void *virt_addr;
98  		dma_addr_t dma_addr;
99  
100  		/*
101  		 * Currently, the DMA buffer needs to be allocated as a
102  		 * single block so that it can be mmap()'ed.
103  		 */
104  		virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
105  					       PAGE_SIZE * n_pages, &dma_addr,
106  					       GFP_KERNEL);
107  		if (!virt_addr)
108  			goto err;
109  
110  		for (i = 0; i < n_pages; i++) {
111  			buf = &bm->page_list[i];
112  			buf->virt_addr = virt_addr + (i << PAGE_SHIFT);
113  			buf->dma_addr = dma_addr + (i << PAGE_SHIFT);
114  		}
115  
116  		bm->n_pages = i;
117  	} else {
118  		for (i = 0; i < n_pages; i++) {
119  			buf = &bm->page_list[i];
120  			buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
121  			if (!buf->virt_addr)
122  				break;
123  
124  			SetPageReserved(virt_to_page(buf->virt_addr));
125  		}
126  
127  		bm->n_pages = i;
128  		if (i < n_pages)
129  			goto err;
130  	}
131  
132  	return bm;
133  
134  err:
135  	comedi_buf_map_put(bm);
136  	return NULL;
137  }
138  
__comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned int n_pages)139  static void __comedi_buf_alloc(struct comedi_device *dev,
140  			       struct comedi_subdevice *s,
141  			       unsigned int n_pages)
142  {
143  	struct comedi_async *async = s->async;
144  	struct page **pages = NULL;
145  	struct comedi_buf_map *bm;
146  	struct comedi_buf_page *buf;
147  	unsigned long flags;
148  	unsigned int i;
149  
150  	if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
151  		dev_err(dev->class_dev,
152  			"dma buffer allocation not supported\n");
153  		return;
154  	}
155  
156  	bm = comedi_buf_map_alloc(dev, s->async_dma_dir, n_pages);
157  	if (!bm)
158  		return;
159  
160  	spin_lock_irqsave(&s->spin_lock, flags);
161  	async->buf_map = bm;
162  	spin_unlock_irqrestore(&s->spin_lock, flags);
163  
164  	if (bm->dma_dir != DMA_NONE) {
165  		/*
166  		 * DMA buffer was allocated as a single block.
167  		 * Address is in page_list[0].
168  		 */
169  		buf = &bm->page_list[0];
170  		async->prealloc_buf = buf->virt_addr;
171  	} else {
172  		pages = vmalloc(sizeof(struct page *) * n_pages);
173  		if (!pages)
174  			return;
175  
176  		for (i = 0; i < n_pages; i++) {
177  			buf = &bm->page_list[i];
178  			pages[i] = virt_to_page(buf->virt_addr);
179  		}
180  
181  		/* vmap the pages to prealloc_buf */
182  		async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
183  					   COMEDI_PAGE_PROTECTION);
184  
185  		vfree(pages);
186  	}
187  }
188  
comedi_buf_map_get(struct comedi_buf_map * bm)189  void comedi_buf_map_get(struct comedi_buf_map *bm)
190  {
191  	if (bm)
192  		kref_get(&bm->refcount);
193  }
194  
comedi_buf_map_put(struct comedi_buf_map * bm)195  int comedi_buf_map_put(struct comedi_buf_map *bm)
196  {
197  	if (bm)
198  		return kref_put(&bm->refcount, comedi_buf_map_kref_release);
199  	return 1;
200  }
201  
202  /* helper for "access" vm operation */
comedi_buf_map_access(struct comedi_buf_map * bm,unsigned long offset,void * buf,int len,int write)203  int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
204  			  void *buf, int len, int write)
205  {
206  	unsigned int pgoff = offset_in_page(offset);
207  	unsigned long pg = offset >> PAGE_SHIFT;
208  	int done = 0;
209  
210  	while (done < len && pg < bm->n_pages) {
211  		int l = min_t(int, len - done, PAGE_SIZE - pgoff);
212  		void *b = bm->page_list[pg].virt_addr + pgoff;
213  
214  		if (write)
215  			memcpy(b, buf, l);
216  		else
217  			memcpy(buf, b, l);
218  		buf += l;
219  		done += l;
220  		pg++;
221  		pgoff = 0;
222  	}
223  	return done;
224  }
225  
226  /* returns s->async->buf_map and increments its kref refcount */
227  struct comedi_buf_map *
comedi_buf_map_from_subdev_get(struct comedi_subdevice * s)228  comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
229  {
230  	struct comedi_async *async = s->async;
231  	struct comedi_buf_map *bm = NULL;
232  	unsigned long flags;
233  
234  	if (!async)
235  		return NULL;
236  
237  	spin_lock_irqsave(&s->spin_lock, flags);
238  	bm = async->buf_map;
239  	/* only want it if buffer pages allocated */
240  	if (bm && bm->n_pages)
241  		comedi_buf_map_get(bm);
242  	else
243  		bm = NULL;
244  	spin_unlock_irqrestore(&s->spin_lock, flags);
245  
246  	return bm;
247  }
248  
comedi_buf_is_mmapped(struct comedi_subdevice * s)249  bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
250  {
251  	struct comedi_buf_map *bm = s->async->buf_map;
252  
253  	return bm && (kref_read(&bm->refcount) > 1);
254  }
255  
comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned long new_size)256  int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
257  		     unsigned long new_size)
258  {
259  	struct comedi_async *async = s->async;
260  
261  	lockdep_assert_held(&dev->mutex);
262  
263  	/* Round up new_size to multiple of PAGE_SIZE */
264  	new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
265  
266  	/* if no change is required, do nothing */
267  	if (async->prealloc_buf && async->prealloc_bufsz == new_size)
268  		return 0;
269  
270  	/* deallocate old buffer */
271  	__comedi_buf_free(dev, s);
272  
273  	/* allocate new buffer */
274  	if (new_size) {
275  		unsigned int n_pages = new_size >> PAGE_SHIFT;
276  
277  		__comedi_buf_alloc(dev, s, n_pages);
278  
279  		if (!async->prealloc_buf) {
280  			/* allocation failed */
281  			__comedi_buf_free(dev, s);
282  			return -ENOMEM;
283  		}
284  	}
285  	async->prealloc_bufsz = new_size;
286  
287  	return 0;
288  }
289  
comedi_buf_reset(struct comedi_subdevice * s)290  void comedi_buf_reset(struct comedi_subdevice *s)
291  {
292  	struct comedi_async *async = s->async;
293  
294  	async->buf_write_alloc_count = 0;
295  	async->buf_write_count = 0;
296  	async->buf_read_alloc_count = 0;
297  	async->buf_read_count = 0;
298  
299  	async->buf_write_ptr = 0;
300  	async->buf_read_ptr = 0;
301  
302  	async->cur_chan = 0;
303  	async->scans_done = 0;
304  	async->scan_progress = 0;
305  	async->munge_chan = 0;
306  	async->munge_count = 0;
307  	async->munge_ptr = 0;
308  
309  	async->events = 0;
310  }
311  
comedi_buf_write_n_unalloc(struct comedi_subdevice * s)312  static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
313  {
314  	struct comedi_async *async = s->async;
315  	unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
316  
317  	return free_end - async->buf_write_alloc_count;
318  }
319  
comedi_buf_write_n_available(struct comedi_subdevice * s)320  unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
321  {
322  	struct comedi_async *async = s->async;
323  	unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
324  
325  	return free_end - async->buf_write_count;
326  }
327  
328  /**
329   * comedi_buf_write_alloc() - Reserve buffer space for writing
330   * @s: COMEDI subdevice.
331   * @nbytes: Maximum space to reserve in bytes.
332   *
333   * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
334   * data buffer associated with the subdevice.  The amount reserved is limited
335   * by the space available.
336   *
337   * Return: The amount of space reserved in bytes.
338   */
comedi_buf_write_alloc(struct comedi_subdevice * s,unsigned int nbytes)339  unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
340  				    unsigned int nbytes)
341  {
342  	struct comedi_async *async = s->async;
343  	unsigned int unalloc = comedi_buf_write_n_unalloc(s);
344  
345  	if (nbytes > unalloc)
346  		nbytes = unalloc;
347  
348  	async->buf_write_alloc_count += nbytes;
349  
350  	/*
351  	 * ensure the async buffer 'counts' are read and updated
352  	 * before we write data to the write-alloc'ed buffer space
353  	 */
354  	smp_mb();
355  
356  	return nbytes;
357  }
358  EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
359  
360  /*
361   * munging is applied to data by core as it passes between user
362   * and kernel space
363   */
comedi_buf_munge(struct comedi_subdevice * s,unsigned int num_bytes)364  static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
365  				     unsigned int num_bytes)
366  {
367  	struct comedi_async *async = s->async;
368  	unsigned int count = 0;
369  	const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
370  
371  	if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
372  		async->munge_count += num_bytes;
373  		return num_bytes;
374  	}
375  
376  	/* don't munge partial samples */
377  	num_bytes -= num_bytes % num_sample_bytes;
378  	while (count < num_bytes) {
379  		int block_size = num_bytes - count;
380  		unsigned int buf_end;
381  
382  		buf_end = async->prealloc_bufsz - async->munge_ptr;
383  		if (block_size > buf_end)
384  			block_size = buf_end;
385  
386  		s->munge(s->device, s,
387  			 async->prealloc_buf + async->munge_ptr,
388  			 block_size, async->munge_chan);
389  
390  		/*
391  		 * ensure data is munged in buffer before the
392  		 * async buffer munge_count is incremented
393  		 */
394  		smp_wmb();
395  
396  		async->munge_chan += block_size / num_sample_bytes;
397  		async->munge_chan %= async->cmd.chanlist_len;
398  		async->munge_count += block_size;
399  		async->munge_ptr += block_size;
400  		async->munge_ptr %= async->prealloc_bufsz;
401  		count += block_size;
402  	}
403  
404  	return count;
405  }
406  
comedi_buf_write_n_allocated(struct comedi_subdevice * s)407  unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
408  {
409  	struct comedi_async *async = s->async;
410  
411  	return async->buf_write_alloc_count - async->buf_write_count;
412  }
413  
414  /**
415   * comedi_buf_write_free() - Free buffer space after it is written
416   * @s: COMEDI subdevice.
417   * @nbytes: Maximum space to free in bytes.
418   *
419   * Free up to @nbytes bytes of space previously reserved for writing in the
420   * COMEDI acquisition data buffer associated with the subdevice.  The amount of
421   * space freed is limited to the amount that was reserved.  The freed space is
422   * assumed to have been filled with sample data by the writer.
423   *
424   * If the samples in the freed space need to be "munged", do so here.  The
425   * freed space becomes available for allocation by the reader.
426   *
427   * Return: The amount of space freed in bytes.
428   */
comedi_buf_write_free(struct comedi_subdevice * s,unsigned int nbytes)429  unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
430  				   unsigned int nbytes)
431  {
432  	struct comedi_async *async = s->async;
433  	unsigned int allocated = comedi_buf_write_n_allocated(s);
434  
435  	if (nbytes > allocated)
436  		nbytes = allocated;
437  
438  	async->buf_write_count += nbytes;
439  	async->buf_write_ptr += nbytes;
440  	comedi_buf_munge(s, async->buf_write_count - async->munge_count);
441  	if (async->buf_write_ptr >= async->prealloc_bufsz)
442  		async->buf_write_ptr %= async->prealloc_bufsz;
443  
444  	return nbytes;
445  }
446  EXPORT_SYMBOL_GPL(comedi_buf_write_free);
447  
448  /**
449   * comedi_buf_read_n_available() - Determine amount of readable buffer space
450   * @s: COMEDI subdevice.
451   *
452   * Determine the amount of readable buffer space in the COMEDI acquisition data
453   * buffer associated with the subdevice.  The readable buffer space is that
454   * which has been freed by the writer and "munged" to the sample data format
455   * expected by COMEDI if necessary.
456   *
457   * Return: The amount of readable buffer space.
458   */
comedi_buf_read_n_available(struct comedi_subdevice * s)459  unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
460  {
461  	struct comedi_async *async = s->async;
462  	unsigned int num_bytes;
463  
464  	if (!async)
465  		return 0;
466  
467  	num_bytes = async->munge_count - async->buf_read_count;
468  
469  	/*
470  	 * ensure the async buffer 'counts' are read before we
471  	 * attempt to read data from the buffer
472  	 */
473  	smp_rmb();
474  
475  	return num_bytes;
476  }
477  EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
478  
479  /**
480   * comedi_buf_read_alloc() - Reserve buffer space for reading
481   * @s: COMEDI subdevice.
482   * @nbytes: Maximum space to reserve in bytes.
483   *
484   * Reserve up to @nbytes bytes of previously written and "munged" buffer space
485   * for reading in the COMEDI acquisition data buffer associated with the
486   * subdevice.  The amount reserved is limited to the space available.  The
487   * reader can read from the reserved space and then free it.  A reader is also
488   * allowed to read from the space before reserving it as long as it determines
489   * the amount of readable data available, but the space needs to be marked as
490   * reserved before it can be freed.
491   *
492   * Return: The amount of space reserved in bytes.
493   */
comedi_buf_read_alloc(struct comedi_subdevice * s,unsigned int nbytes)494  unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
495  				   unsigned int nbytes)
496  {
497  	struct comedi_async *async = s->async;
498  	unsigned int available;
499  
500  	available = async->munge_count - async->buf_read_alloc_count;
501  	if (nbytes > available)
502  		nbytes = available;
503  
504  	async->buf_read_alloc_count += nbytes;
505  
506  	/*
507  	 * ensure the async buffer 'counts' are read before we
508  	 * attempt to read data from the read-alloc'ed buffer space
509  	 */
510  	smp_rmb();
511  
512  	return nbytes;
513  }
514  EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
515  
comedi_buf_read_n_allocated(struct comedi_async * async)516  static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
517  {
518  	return async->buf_read_alloc_count - async->buf_read_count;
519  }
520  
521  /**
522   * comedi_buf_read_free() - Free buffer space after it has been read
523   * @s: COMEDI subdevice.
524   * @nbytes: Maximum space to free in bytes.
525   *
526   * Free up to @nbytes bytes of buffer space previously reserved for reading in
527   * the COMEDI acquisition data buffer associated with the subdevice.  The
528   * amount of space freed is limited to the amount that was reserved.
529   *
530   * The freed space becomes available for allocation by the writer.
531   *
532   * Return: The amount of space freed in bytes.
533   */
comedi_buf_read_free(struct comedi_subdevice * s,unsigned int nbytes)534  unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
535  				  unsigned int nbytes)
536  {
537  	struct comedi_async *async = s->async;
538  	unsigned int allocated;
539  
540  	/*
541  	 * ensure data has been read out of buffer before
542  	 * the async read count is incremented
543  	 */
544  	smp_mb();
545  
546  	allocated = comedi_buf_read_n_allocated(async);
547  	if (nbytes > allocated)
548  		nbytes = allocated;
549  
550  	async->buf_read_count += nbytes;
551  	async->buf_read_ptr += nbytes;
552  	async->buf_read_ptr %= async->prealloc_bufsz;
553  	return nbytes;
554  }
555  EXPORT_SYMBOL_GPL(comedi_buf_read_free);
556  
comedi_buf_memcpy_to(struct comedi_subdevice * s,const void * data,unsigned int num_bytes)557  static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
558  				 const void *data, unsigned int num_bytes)
559  {
560  	struct comedi_async *async = s->async;
561  	unsigned int write_ptr = async->buf_write_ptr;
562  
563  	while (num_bytes) {
564  		unsigned int block_size;
565  
566  		if (write_ptr + num_bytes > async->prealloc_bufsz)
567  			block_size = async->prealloc_bufsz - write_ptr;
568  		else
569  			block_size = num_bytes;
570  
571  		memcpy(async->prealloc_buf + write_ptr, data, block_size);
572  
573  		data += block_size;
574  		num_bytes -= block_size;
575  
576  		write_ptr = 0;
577  	}
578  }
579  
comedi_buf_memcpy_from(struct comedi_subdevice * s,void * dest,unsigned int nbytes)580  static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
581  				   void *dest, unsigned int nbytes)
582  {
583  	void *src;
584  	struct comedi_async *async = s->async;
585  	unsigned int read_ptr = async->buf_read_ptr;
586  
587  	while (nbytes) {
588  		unsigned int block_size;
589  
590  		src = async->prealloc_buf + read_ptr;
591  
592  		if (nbytes >= async->prealloc_bufsz - read_ptr)
593  			block_size = async->prealloc_bufsz - read_ptr;
594  		else
595  			block_size = nbytes;
596  
597  		memcpy(dest, src, block_size);
598  		nbytes -= block_size;
599  		dest += block_size;
600  		read_ptr = 0;
601  	}
602  }
603  
604  /**
605   * comedi_buf_write_samples() - Write sample data to COMEDI buffer
606   * @s: COMEDI subdevice.
607   * @data: Pointer to source samples.
608   * @nsamples: Number of samples to write.
609   *
610   * Write up to @nsamples samples to the COMEDI acquisition data buffer
611   * associated with the subdevice, mark it as written and update the
612   * acquisition scan progress.  If there is not enough room for the specified
613   * number of samples, the number of samples written is limited to the number
614   * that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
615   * acquisition to terminate with an overrun error.  Set the %COMEDI_CB_BLOCK
616   * event flag if any samples are written to cause waiting tasks to be woken
617   * when the event flags are processed.
618   *
619   * Return: The amount of data written in bytes.
620   */
comedi_buf_write_samples(struct comedi_subdevice * s,const void * data,unsigned int nsamples)621  unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
622  				      const void *data, unsigned int nsamples)
623  {
624  	unsigned int max_samples;
625  	unsigned int nbytes;
626  
627  	/*
628  	 * Make sure there is enough room in the buffer for all the samples.
629  	 * If not, clamp the nsamples to the number that will fit, flag the
630  	 * buffer overrun and add the samples that fit.
631  	 */
632  	max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
633  	if (nsamples > max_samples) {
634  		dev_warn(s->device->class_dev, "buffer overrun\n");
635  		s->async->events |= COMEDI_CB_OVERFLOW;
636  		nsamples = max_samples;
637  	}
638  
639  	if (nsamples == 0)
640  		return 0;
641  
642  	nbytes = comedi_buf_write_alloc(s,
643  					comedi_samples_to_bytes(s, nsamples));
644  	comedi_buf_memcpy_to(s, data, nbytes);
645  	comedi_buf_write_free(s, nbytes);
646  	comedi_inc_scan_progress(s, nbytes);
647  	s->async->events |= COMEDI_CB_BLOCK;
648  
649  	return nbytes;
650  }
651  EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
652  
653  /**
654   * comedi_buf_read_samples() - Read sample data from COMEDI buffer
655   * @s: COMEDI subdevice.
656   * @data: Pointer to destination.
657   * @nsamples: Maximum number of samples to read.
658   *
659   * Read up to @nsamples samples from the COMEDI acquisition data buffer
660   * associated with the subdevice, mark it as read and update the acquisition
661   * scan progress.  Limit the number of samples read to the number available.
662   * Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
663   * tasks to be woken when the event flags are processed.
664   *
665   * Return: The amount of data read in bytes.
666   */
comedi_buf_read_samples(struct comedi_subdevice * s,void * data,unsigned int nsamples)667  unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
668  				     void *data, unsigned int nsamples)
669  {
670  	unsigned int max_samples;
671  	unsigned int nbytes;
672  
673  	/* clamp nsamples to the number of full samples available */
674  	max_samples = comedi_bytes_to_samples(s,
675  					      comedi_buf_read_n_available(s));
676  	if (nsamples > max_samples)
677  		nsamples = max_samples;
678  
679  	if (nsamples == 0)
680  		return 0;
681  
682  	nbytes = comedi_buf_read_alloc(s,
683  				       comedi_samples_to_bytes(s, nsamples));
684  	comedi_buf_memcpy_from(s, data, nbytes);
685  	comedi_buf_read_free(s, nbytes);
686  	comedi_inc_scan_progress(s, nbytes);
687  	s->async->events |= COMEDI_CB_BLOCK;
688  
689  	return nbytes;
690  }
691  EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
692