Lines Matching +full:page +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/dma-mapping.c
5 * Copyright (C) 2000-2004 Russell King
17 #include <linux/dma-direct.h>
18 #include <linux/dma-map-ops.h>
28 #include <asm/page.h>
33 #include <asm/dma-iommu.h>
36 #include <asm/xen/xen-ops.h>
43 size_t size; member
53 size_t size; member
55 struct page *page; member
64 struct page **ret_page);
84 if (buf->virt == virt) { in arm_dma_buffer_find()
85 list_del(&buf->list); in arm_dma_buffer_find()
107 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) in __dma_clear_buffer() argument
111 * lurking in the kernel direct-mapped region is invalidated. in __dma_clear_buffer()
113 if (PageHighMem(page)) { in __dma_clear_buffer()
114 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); in __dma_clear_buffer()
115 phys_addr_t end = base + size; in __dma_clear_buffer()
116 while (size > 0) { in __dma_clear_buffer()
117 void *ptr = kmap_atomic(page); in __dma_clear_buffer()
122 page++; in __dma_clear_buffer()
123 size -= PAGE_SIZE; in __dma_clear_buffer()
128 void *ptr = page_address(page); in __dma_clear_buffer()
129 memset(ptr, 0, size); in __dma_clear_buffer()
131 dmac_flush_range(ptr, ptr + size); in __dma_clear_buffer()
132 outer_flush_range(__pa(ptr), __pa(ptr) + size); in __dma_clear_buffer()
138 * Allocate a DMA buffer for 'dev' of size 'size' using the
139 * specified gfp mask. Note that 'size' must be page aligned.
141 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, in __dma_alloc_buffer() argument
144 unsigned long order = get_order(size); in __dma_alloc_buffer()
145 struct page *page, *p, *e; in __dma_alloc_buffer() local
147 page = alloc_pages(gfp, order); in __dma_alloc_buffer()
148 if (!page) in __dma_alloc_buffer()
152 * Now split the huge page and free the excess pages in __dma_alloc_buffer()
154 split_page(page, order); in __dma_alloc_buffer()
155 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) in __dma_alloc_buffer()
158 __dma_clear_buffer(page, size, coherent_flag); in __dma_alloc_buffer()
160 return page; in __dma_alloc_buffer()
164 * Free a DMA buffer. 'size' must be page aligned.
166 static void __dma_free_buffer(struct page *page, size_t size) in __dma_free_buffer() argument
168 struct page *e = page + (size >> PAGE_SHIFT); in __dma_free_buffer()
170 while (page < e) { in __dma_free_buffer()
171 __free_page(page); in __dma_free_buffer()
172 page++; in __dma_free_buffer()
176 static void *__alloc_from_contiguous(struct device *dev, size_t size,
177 pgprot_t prot, struct page **ret_page,
181 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
182 pgprot_t prot, struct page **ret_page,
204 struct page *page; in atomic_pool_init() local
207 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); in atomic_pool_init()
211 * The atomic pool is only used for non-coherent allocations in atomic_pool_init()
216 &page, atomic_pool_init, true, NORMAL, in atomic_pool_init()
220 &page, atomic_pool_init, true); in atomic_pool_init()
225 page_to_phys(page), in atomic_pool_init()
226 atomic_pool_size, -1); in atomic_pool_init()
244 return -ENOMEM; in atomic_pool_init()
254 unsigned long size; member
262 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) in dma_contiguous_early_fixup() argument
265 dma_mmu_remap[dma_mmu_remap_num].size = size; in dma_contiguous_early_fixup()
275 phys_addr_t end = start + dma_mmu_remap[i].size; in dma_contiguous_remap()
286 map.length = end - start; in dma_contiguous_remap()
290 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap()
312 struct page *page = virt_to_page((void *)addr); in __dma_update_pte() local
315 set_pte_ext(pte, mk_pte(page, prot), 0); in __dma_update_pte()
319 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) in __dma_remap() argument
321 unsigned long start = (unsigned long) page_address(page); in __dma_remap()
322 unsigned end = start + size; in __dma_remap()
324 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); in __dma_remap()
328 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, in __alloc_remap_buffer() argument
329 pgprot_t prot, struct page **ret_page, in __alloc_remap_buffer()
332 struct page *page; in __alloc_remap_buffer() local
336 * non-coherent in __alloc_remap_buffer()
338 page = __dma_alloc_buffer(dev, size, gfp, NORMAL); in __alloc_remap_buffer()
339 if (!page) in __alloc_remap_buffer()
344 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_remap_buffer()
346 __dma_free_buffer(page, size); in __alloc_remap_buffer()
351 *ret_page = page; in __alloc_remap_buffer()
355 static void *__alloc_from_pool(size_t size, struct page **ret_page) in __alloc_from_pool() argument
365 val = gen_pool_alloc(atomic_pool, size); in __alloc_from_pool()
376 static bool __in_atomic_pool(void *start, size_t size) in __in_atomic_pool() argument
378 return gen_pool_has_addr(atomic_pool, (unsigned long)start, size); in __in_atomic_pool()
381 static int __free_from_pool(void *start, size_t size) in __free_from_pool() argument
383 if (!__in_atomic_pool(start, size)) in __free_from_pool()
386 gen_pool_free(atomic_pool, (unsigned long)start, size); in __free_from_pool()
391 static void *__alloc_from_contiguous(struct device *dev, size_t size, in __alloc_from_contiguous() argument
392 pgprot_t prot, struct page **ret_page, in __alloc_from_contiguous()
396 unsigned long order = get_order(size); in __alloc_from_contiguous()
397 size_t count = size >> PAGE_SHIFT; in __alloc_from_contiguous()
398 struct page *page; in __alloc_from_contiguous() local
401 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); in __alloc_from_contiguous()
402 if (!page) in __alloc_from_contiguous()
405 __dma_clear_buffer(page, size, coherent_flag); in __alloc_from_contiguous()
410 if (PageHighMem(page)) { in __alloc_from_contiguous()
411 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_from_contiguous()
413 dma_release_from_contiguous(dev, page, count); in __alloc_from_contiguous()
417 __dma_remap(page, size, prot); in __alloc_from_contiguous()
418 ptr = page_address(page); in __alloc_from_contiguous()
422 *ret_page = page; in __alloc_from_contiguous()
426 static void __free_from_contiguous(struct device *dev, struct page *page, in __free_from_contiguous() argument
427 void *cpu_addr, size_t size, bool want_vaddr) in __free_from_contiguous() argument
430 if (PageHighMem(page)) in __free_from_contiguous()
431 dma_common_free_remap(cpu_addr, size); in __free_from_contiguous()
433 __dma_remap(page, size, PAGE_KERNEL); in __free_from_contiguous()
435 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); in __free_from_contiguous()
446 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, in __alloc_simple_buffer() argument
447 struct page **ret_page) in __alloc_simple_buffer()
449 struct page *page; in __alloc_simple_buffer() local
451 page = __dma_alloc_buffer(dev, size, gfp, COHERENT); in __alloc_simple_buffer()
452 if (!page) in __alloc_simple_buffer()
455 *ret_page = page; in __alloc_simple_buffer()
456 return page_address(page); in __alloc_simple_buffer()
460 struct page **ret_page) in simple_allocator_alloc()
462 return __alloc_simple_buffer(args->dev, args->size, args->gfp, in simple_allocator_alloc()
468 __dma_free_buffer(args->page, args->size); in simple_allocator_free()
477 struct page **ret_page) in cma_allocator_alloc()
479 return __alloc_from_contiguous(args->dev, args->size, args->prot, in cma_allocator_alloc()
480 ret_page, args->caller, in cma_allocator_alloc()
481 args->want_vaddr, args->coherent_flag, in cma_allocator_alloc()
482 args->gfp); in cma_allocator_alloc()
487 __free_from_contiguous(args->dev, args->page, args->cpu_addr, in cma_allocator_free()
488 args->size, args->want_vaddr); in cma_allocator_free()
497 struct page **ret_page) in pool_allocator_alloc()
499 return __alloc_from_pool(args->size, ret_page); in pool_allocator_alloc()
504 __free_from_pool(args->cpu_addr, args->size); in pool_allocator_free()
513 struct page **ret_page) in remap_allocator_alloc()
515 return __alloc_remap_buffer(args->dev, args->size, args->gfp, in remap_allocator_alloc()
516 args->prot, ret_page, args->caller, in remap_allocator_alloc()
517 args->want_vaddr); in remap_allocator_alloc()
522 if (args->want_vaddr) in remap_allocator_free()
523 dma_common_free_remap(args->cpu_addr, args->size); in remap_allocator_free()
525 __dma_free_buffer(args->page, args->size); in remap_allocator_free()
533 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, in __dma_alloc() argument
537 u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); in __dma_alloc()
538 struct page *page = NULL; in __dma_alloc() local
544 .size = PAGE_ALIGN(size), in __dma_alloc()
554 if (limit && size >= limit) { in __dma_alloc()
556 size, mask); in __dma_alloc()
576 buf->allocator = &cma_allocator; in __dma_alloc()
578 buf->allocator = &simple_allocator; in __dma_alloc()
580 buf->allocator = &remap_allocator; in __dma_alloc()
582 buf->allocator = &pool_allocator; in __dma_alloc()
584 addr = buf->allocator->alloc(&args, &page); in __dma_alloc()
586 if (page) { in __dma_alloc()
589 *handle = phys_to_dma(dev, page_to_phys(page)); in __dma_alloc()
590 buf->virt = args.want_vaddr ? addr : page; in __dma_alloc()
593 list_add(&buf->list, &arm_dma_bufs); in __dma_alloc()
599 return args.want_vaddr ? addr : page; in __dma_alloc()
605 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, in __arm_dma_free() argument
609 struct page *page = phys_to_page(dma_to_phys(dev, handle)); in __arm_dma_free() local
613 .size = PAGE_ALIGN(size), in __arm_dma_free()
615 .page = page, in __arm_dma_free()
623 buf->allocator->free(&args); in __arm_dma_free()
627 static void dma_cache_maint_page(struct page *page, unsigned long offset, in dma_cache_maint_page() argument
628 size_t size, enum dma_data_direction dir, in dma_cache_maint_page() argument
632 size_t left = size; in dma_cache_maint_page()
634 pfn = page_to_pfn(page) + offset / PAGE_SIZE; in dma_cache_maint_page()
647 page = pfn_to_page(pfn); in dma_cache_maint_page()
649 if (PageHighMem(page)) { in dma_cache_maint_page()
651 len = PAGE_SIZE - offset; in dma_cache_maint_page()
654 vaddr = kmap_atomic(page); in dma_cache_maint_page()
658 vaddr = kmap_high_get(page); in dma_cache_maint_page()
661 kunmap_high(page); in dma_cache_maint_page()
665 vaddr = page_address(page) + offset; in dma_cache_maint_page()
670 left -= len; in dma_cache_maint_page()
677 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
679 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, in __dma_page_cpu_to_dev() argument
680 size_t size, enum dma_data_direction dir) in __dma_page_cpu_to_dev() argument
684 dma_cache_maint_page(page, off, size, dir, dmac_map_area); in __dma_page_cpu_to_dev()
686 paddr = page_to_phys(page) + off; in __dma_page_cpu_to_dev()
688 outer_inv_range(paddr, paddr + size); in __dma_page_cpu_to_dev()
690 outer_clean_range(paddr, paddr + size); in __dma_page_cpu_to_dev()
692 /* FIXME: non-speculating: flush on bidirectional mappings? */ in __dma_page_cpu_to_dev()
695 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, in __dma_page_dev_to_cpu() argument
696 size_t size, enum dma_data_direction dir) in __dma_page_dev_to_cpu() argument
698 phys_addr_t paddr = page_to_phys(page) + off; in __dma_page_dev_to_cpu()
700 /* FIXME: non-speculating: not required */ in __dma_page_dev_to_cpu()
703 outer_inv_range(paddr, paddr + size); in __dma_page_dev_to_cpu()
705 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); in __dma_page_dev_to_cpu()
709 * Mark the D-cache clean for these pages to avoid extra flushing. in __dma_page_dev_to_cpu()
711 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { in __dma_page_dev_to_cpu()
716 size_t sz = folio_size(folio) - offset; in __dma_page_dev_to_cpu()
718 if (size < sz) in __dma_page_dev_to_cpu()
721 set_bit(PG_dcache_clean, &folio->flags); in __dma_page_dev_to_cpu()
723 size -= sz; in __dma_page_dev_to_cpu()
724 if (!size) in __dma_page_dev_to_cpu()
757 size_t size) in __alloc_iova() argument
759 unsigned int order = get_order(size); in __alloc_iova()
762 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova()
770 count = PAGE_ALIGN(size) >> PAGE_SHIFT; in __alloc_iova()
771 align = (1 << order) - 1; in __alloc_iova()
773 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova()
774 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova()
775 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
776 mapping->bits, 0, count, align); in __alloc_iova()
778 if (start > mapping->bits) in __alloc_iova()
781 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
788 * address range of size bytes. in __alloc_iova()
790 if (i == mapping->nr_bitmaps) { in __alloc_iova()
792 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
796 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
797 mapping->bits, 0, count, align); in __alloc_iova()
799 if (start > mapping->bits) { in __alloc_iova()
800 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
804 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
806 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
808 iova = mapping->base + (mapping_size * i); in __alloc_iova()
815 dma_addr_t addr, size_t size) in __free_iova() argument
818 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __free_iova()
823 if (!size) in __free_iova()
826 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; in __free_iova()
827 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); in __free_iova()
829 bitmap_base = mapping->base + mapping_size * bitmap_index; in __free_iova()
831 start = (addr - bitmap_base) >> PAGE_SHIFT; in __free_iova()
833 if (addr + size > bitmap_base + mapping_size) { in __free_iova()
842 count = size >> PAGE_SHIFT; in __free_iova()
844 spin_lock_irqsave(&mapping->lock, flags); in __free_iova()
845 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); in __free_iova()
846 spin_unlock_irqrestore(&mapping->lock, flags); in __free_iova()
852 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, in __iommu_alloc_buffer() argument
856 struct page **pages; in __iommu_alloc_buffer()
857 int count = size >> PAGE_SHIFT; in __iommu_alloc_buffer()
858 int array_size = count * sizeof(struct page *); in __iommu_alloc_buffer()
868 unsigned long order = get_order(size); in __iommu_alloc_buffer()
869 struct page *page; in __iommu_alloc_buffer() local
871 page = dma_alloc_from_contiguous(dev, count, order, in __iommu_alloc_buffer()
873 if (!page) in __iommu_alloc_buffer()
876 __dma_clear_buffer(page, size, coherent_flag); in __iommu_alloc_buffer()
879 pages[i] = page + i; in __iommu_alloc_buffer()
886 order_idx = ARRAY_SIZE(iommu_order_array) - 1; in __iommu_alloc_buffer()
905 /* See if it's easy to allocate a high-order chunk */ in __iommu_alloc_buffer()
922 while (--j) in __iommu_alloc_buffer()
928 count -= 1 << order; in __iommu_alloc_buffer()
933 while (i--) in __iommu_alloc_buffer()
940 static int __iommu_free_buffer(struct device *dev, struct page **pages, in __iommu_free_buffer()
941 size_t size, unsigned long attrs) in __iommu_free_buffer() argument
943 int count = size >> PAGE_SHIFT; in __iommu_free_buffer()
962 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, in __iommu_create_mapping() argument
966 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in __iommu_create_mapping()
970 dma_addr = __alloc_iova(mapping, size); in __iommu_create_mapping()
986 len = (j - i) << PAGE_SHIFT; in __iommu_create_mapping()
987 ret = iommu_map(mapping->domain, iova, phys, len, in __iommu_create_mapping()
997 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); in __iommu_create_mapping()
998 __free_iova(mapping, dma_addr, size); in __iommu_create_mapping()
1002 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) in __iommu_remove_mapping() argument
1007 * add optional in-page offset from iova to size and align in __iommu_remove_mapping()
1008 * result to page size in __iommu_remove_mapping()
1010 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); in __iommu_remove_mapping()
1013 iommu_unmap(mapping->domain, iova, size); in __iommu_remove_mapping()
1014 __free_iova(mapping, iova, size); in __iommu_remove_mapping()
1018 static struct page **__atomic_get_pages(void *addr) in __atomic_get_pages()
1020 struct page *page; in __atomic_get_pages() local
1024 page = phys_to_page(phys); in __atomic_get_pages()
1026 return (struct page **)page; in __atomic_get_pages()
1029 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) in __iommu_get_pages()
1040 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, in __iommu_alloc_simple() argument
1044 struct page *page; in __iommu_alloc_simple() local
1048 addr = __alloc_simple_buffer(dev, size, gfp, &page); in __iommu_alloc_simple()
1050 addr = __alloc_from_pool(size, &page); in __iommu_alloc_simple()
1054 *handle = __iommu_create_mapping(dev, &page, size, attrs); in __iommu_alloc_simple()
1061 __free_from_pool(addr, size); in __iommu_alloc_simple()
1066 dma_addr_t handle, size_t size, int coherent_flag) in __iommu_free_atomic() argument
1068 __iommu_remove_mapping(dev, handle, size); in __iommu_free_atomic()
1070 __dma_free_buffer(virt_to_page(cpu_addr), size); in __iommu_free_atomic()
1072 __free_from_pool(cpu_addr, size); in __iommu_free_atomic()
1075 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, in arm_iommu_alloc_attrs() argument
1079 struct page **pages; in arm_iommu_alloc_attrs()
1081 int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; in arm_iommu_alloc_attrs()
1084 size = PAGE_ALIGN(size); in arm_iommu_alloc_attrs()
1087 return __iommu_alloc_simple(dev, size, gfp, handle, in arm_iommu_alloc_attrs()
1090 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); in arm_iommu_alloc_attrs()
1094 *handle = __iommu_create_mapping(dev, pages, size, attrs); in arm_iommu_alloc_attrs()
1101 addr = dma_common_pages_remap(pages, size, prot, in arm_iommu_alloc_attrs()
1109 __iommu_remove_mapping(dev, *handle, size); in arm_iommu_alloc_attrs()
1111 __iommu_free_buffer(dev, pages, size, attrs); in arm_iommu_alloc_attrs()
1116 void *cpu_addr, dma_addr_t dma_addr, size_t size, in arm_iommu_mmap_attrs() argument
1119 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_mmap_attrs()
1120 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in arm_iommu_mmap_attrs()
1124 return -ENXIO; in arm_iommu_mmap_attrs()
1126 if (vma->vm_pgoff >= nr_pages) in arm_iommu_mmap_attrs()
1127 return -ENXIO; in arm_iommu_mmap_attrs()
1129 if (!dev->dma_coherent) in arm_iommu_mmap_attrs()
1130 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); in arm_iommu_mmap_attrs()
1140 * free a page as defined by the above mapping.
1143 static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, in arm_iommu_free_attrs() argument
1146 int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; in arm_iommu_free_attrs()
1147 struct page **pages; in arm_iommu_free_attrs()
1148 size = PAGE_ALIGN(size); in arm_iommu_free_attrs()
1150 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { in arm_iommu_free_attrs()
1151 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); in arm_iommu_free_attrs()
1162 dma_common_free_remap(cpu_addr, size); in arm_iommu_free_attrs()
1164 __iommu_remove_mapping(dev, handle, size); in arm_iommu_free_attrs()
1165 __iommu_free_buffer(dev, pages, size, attrs); in arm_iommu_free_attrs()
1170 size_t size, unsigned long attrs) in arm_iommu_get_sgtable() argument
1172 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in arm_iommu_get_sgtable()
1173 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_get_sgtable()
1176 return -ENXIO; in arm_iommu_get_sgtable()
1178 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, in arm_iommu_get_sgtable()
1183 * Map a part of the scatter-gather list into contiguous io address space
1186 size_t size, dma_addr_t *handle, in __map_sg_chunk() argument
1196 size = PAGE_ALIGN(size); in __map_sg_chunk()
1199 iova_base = iova = __alloc_iova(mapping, size); in __map_sg_chunk()
1201 return -ENOMEM; in __map_sg_chunk()
1203 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { in __map_sg_chunk()
1205 unsigned int len = PAGE_ALIGN(s->offset + s->length); in __map_sg_chunk()
1207 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in __map_sg_chunk()
1208 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); in __map_sg_chunk()
1212 ret = iommu_map(mapping->domain, iova, phys, len, prot, in __map_sg_chunk()
1223 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); in __map_sg_chunk()
1224 __free_iova(mapping, iova_base, size); in __map_sg_chunk()
1229 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1245 unsigned int offset = s->offset; in arm_iommu_map_sg()
1246 unsigned int size = s->offset + s->length; in arm_iommu_map_sg() local
1252 s->dma_length = 0; in arm_iommu_map_sg()
1254 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { in arm_iommu_map_sg()
1255 ret = __map_sg_chunk(dev, start, size, in arm_iommu_map_sg()
1256 &dma->dma_address, dir, attrs); in arm_iommu_map_sg()
1260 dma->dma_address += offset; in arm_iommu_map_sg()
1261 dma->dma_length = size - offset; in arm_iommu_map_sg()
1263 size = offset = s->offset; in arm_iommu_map_sg()
1268 size += s->length; in arm_iommu_map_sg()
1270 ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs); in arm_iommu_map_sg()
1274 dma->dma_address += offset; in arm_iommu_map_sg()
1275 dma->dma_length = size - offset; in arm_iommu_map_sg()
1282 if (ret == -ENOMEM) in arm_iommu_map_sg()
1284 return -EINVAL; in arm_iommu_map_sg()
1288 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1309 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in arm_iommu_unmap_sg()
1310 __dma_page_dev_to_cpu(sg_page(s), s->offset, in arm_iommu_unmap_sg()
1311 s->length, dir); in arm_iommu_unmap_sg()
1329 if (dev->dma_coherent) in arm_iommu_sync_sg_for_cpu()
1333 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); in arm_iommu_sync_sg_for_cpu()
1351 if (dev->dma_coherent) in arm_iommu_sync_sg_for_device()
1355 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); in arm_iommu_sync_sg_for_device()
1361 * @page: page that buffer resides in
1362 * @offset: offset into page for start of buffer
1363 * @size: size of buffer to map
1368 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, in arm_iommu_map_page() argument
1369 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_iommu_map_page() argument
1374 int ret, prot, len = PAGE_ALIGN(size + offset); in arm_iommu_map_page()
1376 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in arm_iommu_map_page()
1377 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_map_page()
1385 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, in arm_iommu_map_page()
1400 * @size: size of buffer (same as passed to dma_map_page)
1406 size_t size, enum dma_data_direction dir, unsigned long attrs) in arm_iommu_unmap_page() argument
1410 struct page *page; in arm_iommu_unmap_page() local
1412 int len = PAGE_ALIGN(size + offset); in arm_iommu_unmap_page()
1417 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { in arm_iommu_unmap_page()
1418 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_unmap_page()
1419 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_unmap_page()
1422 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_page()
1427 * arm_iommu_map_resource - map a device resource for DMA
1430 * @size: size of resource to map
1434 phys_addr_t phys_addr, size_t size, in arm_iommu_map_resource() argument
1442 size_t len = PAGE_ALIGN(size + offset); in arm_iommu_map_resource()
1450 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL); in arm_iommu_map_resource()
1461 * arm_iommu_unmap_resource - unmap a device DMA resource
1464 * @size: size of resource to map
1468 size_t size, enum dma_data_direction dir, in arm_iommu_unmap_resource() argument
1474 size_t len = PAGE_ALIGN(size + offset); in arm_iommu_unmap_resource()
1479 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_resource()
1484 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_iommu_sync_single_for_cpu() argument
1488 struct page *page; in arm_iommu_sync_single_for_cpu() local
1491 if (dev->dma_coherent || !iova) in arm_iommu_sync_single_for_cpu()
1494 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_cpu()
1495 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_sync_single_for_cpu()
1499 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_iommu_sync_single_for_device() argument
1503 struct page *page; in arm_iommu_sync_single_for_device() local
1506 if (dev->dma_coherent || !iova) in arm_iommu_sync_single_for_device()
1509 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_device()
1510 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_sync_single_for_device()
1537 * @size: maximum size of the valid IO address space
1547 arm_iommu_create_mapping(struct device *dev, dma_addr_t base, u64 size) in arm_iommu_create_mapping() argument
1549 unsigned int bits = size >> PAGE_SHIFT; in arm_iommu_create_mapping()
1553 int err = -ENOMEM; in arm_iommu_create_mapping()
1555 /* currently only 32-bit DMA address space is supported */ in arm_iommu_create_mapping()
1556 if (size > DMA_BIT_MASK(32) + 1) in arm_iommu_create_mapping()
1557 return ERR_PTR(-ERANGE); in arm_iommu_create_mapping()
1560 return ERR_PTR(-EINVAL); in arm_iommu_create_mapping()
1571 mapping->bitmap_size = bitmap_size; in arm_iommu_create_mapping()
1572 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), in arm_iommu_create_mapping()
1574 if (!mapping->bitmaps) in arm_iommu_create_mapping()
1577 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); in arm_iommu_create_mapping()
1578 if (!mapping->bitmaps[0]) in arm_iommu_create_mapping()
1581 mapping->nr_bitmaps = 1; in arm_iommu_create_mapping()
1582 mapping->extensions = extensions; in arm_iommu_create_mapping()
1583 mapping->base = base; in arm_iommu_create_mapping()
1584 mapping->bits = BITS_PER_BYTE * bitmap_size; in arm_iommu_create_mapping()
1586 spin_lock_init(&mapping->lock); in arm_iommu_create_mapping()
1588 mapping->domain = iommu_paging_domain_alloc(dev); in arm_iommu_create_mapping()
1589 if (IS_ERR(mapping->domain)) { in arm_iommu_create_mapping()
1590 err = PTR_ERR(mapping->domain); in arm_iommu_create_mapping()
1594 kref_init(&mapping->kref); in arm_iommu_create_mapping()
1597 kfree(mapping->bitmaps[0]); in arm_iommu_create_mapping()
1599 kfree(mapping->bitmaps); in arm_iommu_create_mapping()
1613 iommu_domain_free(mapping->domain); in release_iommu_mapping()
1614 for (i = 0; i < mapping->nr_bitmaps; i++) in release_iommu_mapping()
1615 kfree(mapping->bitmaps[i]); in release_iommu_mapping()
1616 kfree(mapping->bitmaps); in release_iommu_mapping()
1624 if (mapping->nr_bitmaps >= mapping->extensions) in extend_iommu_mapping()
1625 return -EINVAL; in extend_iommu_mapping()
1627 next_bitmap = mapping->nr_bitmaps; in extend_iommu_mapping()
1628 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, in extend_iommu_mapping()
1630 if (!mapping->bitmaps[next_bitmap]) in extend_iommu_mapping()
1631 return -ENOMEM; in extend_iommu_mapping()
1633 mapping->nr_bitmaps++; in extend_iommu_mapping()
1641 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_release_mapping()
1650 err = iommu_attach_device(mapping->domain, dev); in __arm_iommu_attach_device()
1654 kref_get(&mapping->kref); in __arm_iommu_attach_device()
1693 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
1705 iommu_detach_device(mapping->domain, dev); in arm_iommu_detach_device()
1706 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_detach_device()
1717 u64 dma_base = 0, size = 1ULL << 32; in arm_setup_iommu_dma_ops() local
1719 if (dev->dma_range_map) { in arm_setup_iommu_dma_ops()
1720 dma_base = dma_range_map_min(dev->dma_range_map); in arm_setup_iommu_dma_ops()
1721 size = dma_range_map_max(dev->dma_range_map) - dma_base; in arm_setup_iommu_dma_ops()
1723 mapping = arm_iommu_create_mapping(dev, dma_base, size); in arm_setup_iommu_dma_ops()
1725 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", in arm_setup_iommu_dma_ops()
1726 size, dev_name(dev)); in arm_setup_iommu_dma_ops()
1764 * Due to legacy code that sets the ->dma_coherent flag from a bus in arch_setup_dma_ops()
1765 * notifier we can't just assign coherent to the ->dma_coherent flag in arch_setup_dma_ops()
1770 dev->dma_coherent = true; in arch_setup_dma_ops()
1777 if (dev->dma_ops) in arch_setup_dma_ops()
1784 dev->archdata.dma_ops_setup = true; in arch_setup_dma_ops()
1789 if (!dev->archdata.dma_ops_setup) in arch_teardown_dma_ops()
1793 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ in arch_teardown_dma_ops()
1797 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, in arch_sync_dma_for_device() argument
1800 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), in arch_sync_dma_for_device()
1801 size, dir); in arch_sync_dma_for_device()
1804 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, in arch_sync_dma_for_cpu() argument
1807 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), in arch_sync_dma_for_cpu()
1808 size, dir); in arch_sync_dma_for_cpu()
1811 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, in arch_dma_alloc() argument
1814 return __dma_alloc(dev, size, dma_handle, gfp, in arch_dma_alloc()
1819 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, in arch_dma_free() argument
1822 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); in arch_dma_free()