Lines Matching +full:buffer +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-or-later
11 #include <linux/dma-mapping.h>
12 #include <linux/dma-map-ops.h>
22 void *(*alloc)(struct snd_dma_buffer *dmab, size_t size);
27 unsigned int ofs, unsigned int size);
34 __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
35 __GFP_NOWARN) /* no stack trace print - this call is non-critical */
39 static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) in __snd_dma_alloc_pages() argument
43 if (WARN_ON_ONCE(!ops || !ops->alloc)) in __snd_dma_alloc_pages()
45 return ops->alloc(dmab, size); in __snd_dma_alloc_pages()
49 * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
51 * @type: the DMA buffer type
54 * @size: the buffer size to allocate
55 * @dmab: buffer allocation record to store the allocated data
57 * Calls the memory-allocator function for the corresponding
58 * buffer type.
60 * Return: Zero if the buffer with the given size is allocated successfully,
64 enum dma_data_direction dir, size_t size, in snd_dma_alloc_dir_pages() argument
67 if (WARN_ON(!size)) in snd_dma_alloc_dir_pages()
68 return -ENXIO; in snd_dma_alloc_dir_pages()
70 return -ENXIO; in snd_dma_alloc_dir_pages()
72 size = PAGE_ALIGN(size); in snd_dma_alloc_dir_pages()
73 dmab->dev.type = type; in snd_dma_alloc_dir_pages()
74 dmab->dev.dev = device; in snd_dma_alloc_dir_pages()
75 dmab->dev.dir = dir; in snd_dma_alloc_dir_pages()
76 dmab->bytes = 0; in snd_dma_alloc_dir_pages()
77 dmab->addr = 0; in snd_dma_alloc_dir_pages()
78 dmab->private_data = NULL; in snd_dma_alloc_dir_pages()
79 dmab->area = __snd_dma_alloc_pages(dmab, size); in snd_dma_alloc_dir_pages()
80 if (!dmab->area) in snd_dma_alloc_dir_pages()
81 return -ENOMEM; in snd_dma_alloc_dir_pages()
82 dmab->bytes = size; in snd_dma_alloc_dir_pages()
88 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
89 * @type: the DMA buffer type
91 * @size: the buffer size to allocate
92 * @dmab: buffer allocation record to store the allocated data
94 * Calls the memory-allocator function for the corresponding
95 * buffer type. When no space is left, this function reduces the size and
96 * tries to allocate again. The size actually allocated is stored in
99 * Return: Zero if the buffer with the given size is allocated successfully,
102 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, in snd_dma_alloc_pages_fallback() argument
107 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { in snd_dma_alloc_pages_fallback()
108 if (err != -ENOMEM) in snd_dma_alloc_pages_fallback()
110 if (size <= PAGE_SIZE) in snd_dma_alloc_pages_fallback()
111 return -ENOMEM; in snd_dma_alloc_pages_fallback()
112 size >>= 1; in snd_dma_alloc_pages_fallback()
113 size = PAGE_SIZE << get_order(size); in snd_dma_alloc_pages_fallback()
115 if (! dmab->area) in snd_dma_alloc_pages_fallback()
116 return -ENOMEM; in snd_dma_alloc_pages_fallback()
122 * snd_dma_free_pages - release the allocated buffer
123 * @dmab: the buffer allocation record to release
125 * Releases the allocated buffer via snd_dma_alloc_pages().
131 if (ops && ops->free) in snd_dma_free_pages()
132 ops->free(dmab); in snd_dma_free_pages()
143 * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
145 * @type: the DMA buffer type
147 * @size: the buffer size to allocate
149 * Allocate buffer pages depending on the given type and manage using devres.
160 enum dma_data_direction dir, size_t size) in snd_devm_alloc_dir_pages() argument
173 err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab); in snd_devm_alloc_dir_pages()
185 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
186 * @dmab: buffer allocation information
197 return -ENOENT; in snd_dma_buffer_mmap()
199 if (ops && ops->mmap) in snd_dma_buffer_mmap()
200 return ops->mmap(dmab, area); in snd_dma_buffer_mmap()
202 return -ENOENT; in snd_dma_buffer_mmap()
208 * snd_dma_buffer_sync - sync DMA buffer between CPU and device
209 * @dmab: buffer allocation information
217 if (!dmab || !dmab->dev.need_sync) in snd_dma_buffer_sync()
220 if (ops && ops->sync) in snd_dma_buffer_sync()
221 ops->sync(dmab, mode); in snd_dma_buffer_sync()
227 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
228 * @dmab: buffer allocation information
229 * @offset: offset in the ring buffer
237 if (ops && ops->get_addr) in snd_sgbuf_get_addr()
238 return ops->get_addr(dmab, offset); in snd_sgbuf_get_addr()
240 return dmab->addr + offset; in snd_sgbuf_get_addr()
245 * snd_sgbuf_get_page - return the physical page at the corresponding offset
246 * @dmab: buffer allocation information
247 * @offset: offset in the ring buffer
255 if (ops && ops->get_page) in snd_sgbuf_get_page()
256 return ops->get_page(dmab, offset); in snd_sgbuf_get_page()
258 return virt_to_page(dmab->area + offset); in snd_sgbuf_get_page()
263 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
264 * on sg-buffer
265 * @dmab: buffer allocation information
266 * @ofs: offset in the ring buffer
267 * @size: the requested size
269 * Return: the chunk size
272 unsigned int ofs, unsigned int size) in snd_sgbuf_get_chunk_size() argument
276 if (ops && ops->get_chunk_size) in snd_sgbuf_get_chunk_size()
277 return ops->get_chunk_size(dmab, ofs, size); in snd_sgbuf_get_chunk_size()
279 return size; in snd_sgbuf_get_chunk_size()
286 static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr, in do_alloc_pages() argument
293 p = alloc_pages_exact(size, gfp); in do_alloc_pages()
299 if ((*addr + size - 1) & ~dev->coherent_dma_mask) { in do_alloc_pages()
311 set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT); in do_alloc_pages()
316 static void do_free_pages(void *p, size_t size, bool wc) in do_free_pages() argument
320 set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT); in do_free_pages()
322 free_pages_exact(p, size); in do_free_pages()
326 static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) in snd_dma_continuous_alloc() argument
328 return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false); in snd_dma_continuous_alloc()
333 do_free_pages(dmab->area, dmab->bytes, false); in snd_dma_continuous_free()
339 return remap_pfn_range(area, area->vm_start, in snd_dma_continuous_mmap()
340 dmab->addr >> PAGE_SHIFT, in snd_dma_continuous_mmap()
341 area->vm_end - area->vm_start, in snd_dma_continuous_mmap()
342 area->vm_page_prot); in snd_dma_continuous_mmap()
354 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) in snd_dma_vmalloc_alloc() argument
356 return vmalloc(size); in snd_dma_vmalloc_alloc()
361 vfree(dmab->area); in snd_dma_vmalloc_free()
367 return remap_vmalloc_range(area, dmab->area, 0); in snd_dma_vmalloc_mmap()
371 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
382 return vmalloc_to_page(dmab->area + offset); in snd_dma_vmalloc_get_page()
387 unsigned int ofs, unsigned int size) in snd_dma_vmalloc_get_chunk_size() argument
393 end = ofs + size - 1; /* the last byte address */ in snd_dma_vmalloc_get_chunk_size()
402 return start - ofs; in snd_dma_vmalloc_get_chunk_size()
405 return size; in snd_dma_vmalloc_get_chunk_size()
422 static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) in snd_dma_iram_alloc() argument
424 struct device *dev = dmab->dev.dev; in snd_dma_iram_alloc()
428 if (dev->of_node) { in snd_dma_iram_alloc()
429 pool = of_gen_pool_get(dev->of_node, "iram", 0); in snd_dma_iram_alloc()
431 dmab->private_data = pool; in snd_dma_iram_alloc()
433 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); in snd_dma_iram_alloc()
438 /* Internal memory might have limited size and no enough space, in snd_dma_iram_alloc()
441 dmab->dev.type = SNDRV_DMA_TYPE_DEV; in snd_dma_iram_alloc()
442 return __snd_dma_alloc_pages(dmab, size); in snd_dma_iram_alloc()
447 struct gen_pool *pool = dmab->private_data; in snd_dma_iram_free()
449 if (pool && dmab->area) in snd_dma_iram_free()
450 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); in snd_dma_iram_free()
456 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_iram_mmap()
457 return remap_pfn_range(area, area->vm_start, in snd_dma_iram_mmap()
458 dmab->addr >> PAGE_SHIFT, in snd_dma_iram_mmap()
459 area->vm_end - area->vm_start, in snd_dma_iram_mmap()
460 area->vm_page_prot); in snd_dma_iram_mmap()
473 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) in snd_dma_dev_alloc() argument
475 return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); in snd_dma_dev_alloc()
480 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); in snd_dma_dev_free()
486 return dma_mmap_coherent(dmab->dev.dev, area, in snd_dma_dev_mmap()
487 dmab->area, dmab->addr, dmab->bytes); in snd_dma_dev_mmap()
497 * Write-combined pages
500 /* x86-specific allocations */
501 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) in snd_dma_wc_alloc() argument
503 void *p = do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true); in snd_dma_wc_alloc()
507 dmab->addr = dma_map_single(dmab->dev.dev, p, size, DMA_BIDIRECTIONAL); in snd_dma_wc_alloc()
508 if (dmab->addr == DMA_MAPPING_ERROR) { in snd_dma_wc_alloc()
509 do_free_pages(dmab->area, size, true); in snd_dma_wc_alloc()
517 dma_unmap_single(dmab->dev.dev, dmab->addr, dmab->bytes, in snd_dma_wc_free()
519 do_free_pages(dmab->area, dmab->bytes, true); in snd_dma_wc_free()
525 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_wc_mmap()
526 return dma_mmap_coherent(dmab->dev.dev, area, in snd_dma_wc_mmap()
527 dmab->area, dmab->addr, dmab->bytes); in snd_dma_wc_mmap()
530 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) in snd_dma_wc_alloc() argument
532 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); in snd_dma_wc_alloc()
537 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); in snd_dma_wc_free()
543 return dma_mmap_wc(dmab->dev.dev, area, in snd_dma_wc_mmap()
544 dmab->area, dmab->addr, dmab->bytes); in snd_dma_wc_mmap()
555 * Non-contiguous pages allocator
557 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size) in snd_dma_noncontig_alloc() argument
562 sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir, in snd_dma_noncontig_alloc()
567 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, in snd_dma_noncontig_alloc()
568 sg_dma_address(sgt->sgl)); in snd_dma_noncontig_alloc()
569 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt); in snd_dma_noncontig_alloc()
571 dmab->private_data = sgt; in snd_dma_noncontig_alloc()
573 dmab->addr = snd_sgbuf_get_addr(dmab, 0); in snd_dma_noncontig_alloc()
575 dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir); in snd_dma_noncontig_alloc()
582 dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area); in snd_dma_noncontig_free()
583 dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data, in snd_dma_noncontig_free()
584 dmab->dev.dir); in snd_dma_noncontig_free()
590 return dma_mmap_noncontiguous(dmab->dev.dev, area, in snd_dma_noncontig_mmap()
591 dmab->bytes, dmab->private_data); in snd_dma_noncontig_mmap()
598 if (dmab->dev.dir == DMA_TO_DEVICE) in snd_dma_noncontig_sync()
600 invalidate_kernel_vmap_range(dmab->area, dmab->bytes); in snd_dma_noncontig_sync()
601 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data, in snd_dma_noncontig_sync()
602 dmab->dev.dir); in snd_dma_noncontig_sync()
604 if (dmab->dev.dir == DMA_FROM_DEVICE) in snd_dma_noncontig_sync()
606 flush_kernel_vmap_range(dmab->area, dmab->bytes); in snd_dma_noncontig_sync()
607 dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data, in snd_dma_noncontig_sync()
608 dmab->dev.dir); in snd_dma_noncontig_sync()
616 struct sg_table *sgt = dmab->private_data; in snd_dma_noncontig_iter_set()
618 __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents, in snd_dma_noncontig_iter_set()
644 unsigned int ofs, unsigned int size) in snd_dma_noncontig_get_chunk_size() argument
651 end = ofs + size - 1; /* the last byte address */ in snd_dma_noncontig_get_chunk_size()
664 return start - ofs; in snd_dma_noncontig_get_chunk_size()
667 return size; in snd_dma_noncontig_get_chunk_size()
681 /* Fallback SG-buffer allocations for x86 */
683 struct sg_table sgt; /* used by get_addr - must be the first item */
692 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG; in __snd_dma_sg_fallback_free()
693 size_t i, size; in __snd_dma_sg_fallback_free() local
695 if (sgbuf->pages && sgbuf->npages) { in __snd_dma_sg_fallback_free()
697 while (i < sgbuf->count) { in __snd_dma_sg_fallback_free()
698 size = sgbuf->npages[i]; in __snd_dma_sg_fallback_free()
699 if (!size) in __snd_dma_sg_fallback_free()
701 do_free_pages(page_address(sgbuf->pages[i]), in __snd_dma_sg_fallback_free()
702 size << PAGE_SHIFT, wc); in __snd_dma_sg_fallback_free()
703 i += size; in __snd_dma_sg_fallback_free()
706 kvfree(sgbuf->pages); in __snd_dma_sg_fallback_free()
707 kvfree(sgbuf->npages); in __snd_dma_sg_fallback_free()
711 /* fallback manual S/G buffer allocations */
712 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size) in snd_dma_sg_fallback_alloc() argument
714 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG; in snd_dma_sg_fallback_alloc()
725 size = PAGE_ALIGN(size); in snd_dma_sg_fallback_alloc()
726 sgbuf->count = size >> PAGE_SHIFT; in snd_dma_sg_fallback_alloc()
727 sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL); in snd_dma_sg_fallback_alloc()
728 sgbuf->npages = kvcalloc(sgbuf->count, sizeof(*sgbuf->npages), GFP_KERNEL); in snd_dma_sg_fallback_alloc()
729 if (!sgbuf->pages || !sgbuf->npages) in snd_dma_sg_fallback_alloc()
732 pagep = sgbuf->pages; in snd_dma_sg_fallback_alloc()
733 chunk = size; in snd_dma_sg_fallback_alloc()
735 while (size > 0) { in snd_dma_sg_fallback_alloc()
736 chunk = min(size, chunk); in snd_dma_sg_fallback_alloc()
737 p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc); in snd_dma_sg_fallback_alloc()
746 size -= chunk; in snd_dma_sg_fallback_alloc()
749 sgbuf->npages[idx] = npages; in snd_dma_sg_fallback_alloc()
752 while (npages--) in snd_dma_sg_fallback_alloc()
756 if (sg_alloc_table_from_pages(&sgbuf->sgt, sgbuf->pages, sgbuf->count, in snd_dma_sg_fallback_alloc()
757 0, sgbuf->count << PAGE_SHIFT, GFP_KERNEL)) in snd_dma_sg_fallback_alloc()
760 if (dma_map_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0)) in snd_dma_sg_fallback_alloc()
763 p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL); in snd_dma_sg_fallback_alloc()
767 dmab->private_data = sgbuf; in snd_dma_sg_fallback_alloc()
769 dmab->addr = snd_sgbuf_get_addr(dmab, 0); in snd_dma_sg_fallback_alloc()
773 dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0); in snd_dma_sg_fallback_alloc()
775 sg_free_table(&sgbuf->sgt); in snd_dma_sg_fallback_alloc()
783 struct snd_dma_sg_fallback *sgbuf = dmab->private_data; in snd_dma_sg_fallback_free()
785 vunmap(dmab->area); in snd_dma_sg_fallback_free()
786 dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0); in snd_dma_sg_fallback_free()
787 sg_free_table(&sgbuf->sgt); in snd_dma_sg_fallback_free()
788 __snd_dma_sg_fallback_free(dmab, dmab->private_data); in snd_dma_sg_fallback_free()
794 struct snd_dma_sg_fallback *sgbuf = dmab->private_data; in snd_dma_sg_fallback_mmap()
796 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) in snd_dma_sg_fallback_mmap()
797 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); in snd_dma_sg_fallback_mmap()
798 return vm_map_pages(area, sgbuf->pages, sgbuf->count); in snd_dma_sg_fallback_mmap()
801 static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size) in snd_dma_sg_alloc() argument
803 int type = dmab->dev.type; in snd_dma_sg_alloc()
808 dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC; in snd_dma_sg_alloc()
810 dmab->dev.type = SNDRV_DMA_TYPE_DEV; in snd_dma_sg_alloc()
811 p = __snd_dma_alloc_pages(dmab, size); in snd_dma_sg_alloc()
815 dmab->dev.type = type; /* restore the type */ in snd_dma_sg_alloc()
816 return snd_dma_sg_fallback_alloc(dmab, size); in snd_dma_sg_alloc()
832 * Non-coherent pages allocator
834 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size) in snd_dma_noncoherent_alloc() argument
838 p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, in snd_dma_noncoherent_alloc()
839 dmab->dev.dir, DEFAULT_GFP); in snd_dma_noncoherent_alloc()
841 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr); in snd_dma_noncoherent_alloc()
847 dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area, in snd_dma_noncoherent_free()
848 dmab->addr, dmab->dev.dir); in snd_dma_noncoherent_free()
854 area->vm_page_prot = vm_get_page_prot(area->vm_flags); in snd_dma_noncoherent_mmap()
855 return dma_mmap_pages(dmab->dev.dev, area, in snd_dma_noncoherent_mmap()
856 area->vm_end - area->vm_start, in snd_dma_noncoherent_mmap()
857 virt_to_page(dmab->area)); in snd_dma_noncoherent_mmap()
864 if (dmab->dev.dir != DMA_TO_DEVICE) in snd_dma_noncoherent_sync()
865 dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr, in snd_dma_noncoherent_sync()
866 dmab->bytes, dmab->dev.dir); in snd_dma_noncoherent_sync()
868 if (dmab->dev.dir != DMA_FROM_DEVICE) in snd_dma_noncoherent_sync()
869 dma_sync_single_for_device(dmab->dev.dev, dmab->addr, in snd_dma_noncoherent_sync()
870 dmab->bytes, dmab->dev.dir); in snd_dma_noncoherent_sync()
908 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || in snd_dma_get_ops()
909 dmab->dev.type >= ARRAY_SIZE(snd_dma_ops))) in snd_dma_get_ops()
911 return snd_dma_ops[dmab->dev.type]; in snd_dma_get_ops()